summaryrefslogtreecommitdiffstats
path: root/library
diff options
context:
space:
mode:
Diffstat (limited to 'library')
-rw-r--r--library/alloc/benches/btree/map.rs1
-rw-r--r--library/alloc/benches/vec.rs2
-rw-r--r--library/alloc/src/alloc.rs8
-rw-r--r--library/alloc/src/borrow.rs5
-rw-r--r--library/alloc/src/boxed.rs50
-rw-r--r--library/alloc/src/collections/binary_heap/mod.rs91
-rw-r--r--library/alloc/src/collections/btree/map.rs128
-rw-r--r--library/alloc/src/collections/btree/map/tests.rs67
-rw-r--r--library/alloc/src/collections/btree/mod.rs1
-rw-r--r--library/alloc/src/collections/btree/navigate.rs12
-rw-r--r--library/alloc/src/collections/btree/set.rs55
-rw-r--r--library/alloc/src/collections/btree/set/tests.rs1
-rw-r--r--library/alloc/src/collections/linked_list.rs38
-rw-r--r--library/alloc/src/collections/vec_deque/drain.rs40
-rw-r--r--library/alloc/src/collections/vec_deque/into_iter.rs30
-rw-r--r--library/alloc/src/collections/vec_deque/iter.rs33
-rw-r--r--library/alloc/src/collections/vec_deque/iter_mut.rs32
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs49
-rw-r--r--library/alloc/src/lib.rs49
-rw-r--r--library/alloc/src/raw_vec.rs1
-rw-r--r--library/alloc/src/rc.rs44
-rw-r--r--library/alloc/src/rc/tests.rs15
-rw-r--r--library/alloc/src/str.rs4
-rw-r--r--library/alloc/src/string.rs14
-rw-r--r--library/alloc/src/sync.rs60
-rw-r--r--library/alloc/src/tests.rs24
-rw-r--r--library/alloc/src/vec/cow.rs1
-rw-r--r--library/alloc/src/vec/into_iter.rs35
-rw-r--r--library/alloc/src/vec/mod.rs20
-rw-r--r--library/alloc/tests/boxed.rs15
-rw-r--r--library/alloc/tests/lib.rs6
-rw-r--r--library/alloc/tests/str.rs26
-rw-r--r--library/alloc/tests/vec.rs22
-rw-r--r--library/alloc/tests/vec_deque.rs23
-rw-r--r--library/core/benches/lib.rs1
-rw-r--r--library/core/benches/tuple.rs22
-rw-r--r--library/core/src/alloc/mod.rs6
-rw-r--r--library/core/src/array/iter.rs15
-rw-r--r--library/core/src/cell.rs105
-rw-r--r--library/core/src/cell/lazy.rs106
-rw-r--r--library/core/src/cell/once.rs52
-rw-r--r--library/core/src/cmp.rs55
-rw-r--r--library/core/src/convert/mod.rs3
-rw-r--r--library/core/src/convert/num.rs26
-rw-r--r--library/core/src/error.rs2
-rw-r--r--library/core/src/ffi/c_str.rs5
-rw-r--r--library/core/src/ffi/mod.rs3
-rw-r--r--library/core/src/fmt/mod.rs32
-rw-r--r--library/core/src/fmt/rt/v1.rs6
-rw-r--r--library/core/src/future/mod.rs6
-rw-r--r--library/core/src/hash/mod.rs4
-rw-r--r--library/core/src/hint.rs2
-rw-r--r--library/core/src/intrinsics.rs265
-rw-r--r--library/core/src/intrinsics/mir.rs34
-rw-r--r--library/core/src/iter/adapters/by_ref_sized.rs5
-rw-r--r--library/core/src/iter/adapters/chain.rs73
-rw-r--r--library/core/src/iter/adapters/cloned.rs14
-rw-r--r--library/core/src/iter/adapters/copied.rs19
-rw-r--r--library/core/src/iter/adapters/cycle.rs26
-rw-r--r--library/core/src/iter/adapters/enumerate.rs36
-rw-r--r--library/core/src/iter/adapters/flatten.rs39
-rw-r--r--library/core/src/iter/adapters/fuse.rs15
-rw-r--r--library/core/src/iter/adapters/rev.rs19
-rw-r--r--library/core/src/iter/adapters/skip.rs54
-rw-r--r--library/core/src/iter/adapters/take.rs34
-rw-r--r--library/core/src/iter/mod.rs2
-rw-r--r--library/core/src/iter/range.rs25
-rw-r--r--library/core/src/iter/sources/repeat.rs5
-rw-r--r--library/core/src/iter/sources/repeat_n.rs8
-rw-r--r--library/core/src/iter/sources/repeat_with.rs1
-rw-r--r--library/core/src/iter/traits/accum.rs40
-rw-r--r--library/core/src/iter/traits/double_ended.rs26
-rw-r--r--library/core/src/iter/traits/iterator.rs45
-rw-r--r--library/core/src/lib.rs52
-rw-r--r--library/core/src/macros/mod.rs8
-rw-r--r--library/core/src/marker.rs26
-rw-r--r--library/core/src/mem/maybe_uninit.rs16
-rw-r--r--library/core/src/mem/transmutability.rs4
-rw-r--r--library/core/src/net/parser.rs2
-rw-r--r--library/core/src/num/dec2flt/common.rs179
-rw-r--r--library/core/src/num/dec2flt/decimal.rs65
-rw-r--r--library/core/src/num/dec2flt/float.rs4
-rw-r--r--library/core/src/num/dec2flt/lemire.rs2
-rw-r--r--library/core/src/num/dec2flt/mod.rs18
-rw-r--r--library/core/src/num/dec2flt/number.rs1
-rw-r--r--library/core/src/num/dec2flt/parse.rs224
-rw-r--r--library/core/src/num/f32.rs2
-rw-r--r--library/core/src/num/f64.rs2
-rw-r--r--library/core/src/num/mod.rs2
-rw-r--r--library/core/src/num/nonzero.rs23
-rw-r--r--library/core/src/num/shells/u16.rs2
-rw-r--r--library/core/src/num/uint_macros.rs9
-rw-r--r--library/core/src/ops/index_range.rs15
-rw-r--r--library/core/src/ops/try_trait.rs9
-rw-r--r--library/core/src/option.rs110
-rw-r--r--library/core/src/panic/panic_info.rs8
-rw-r--r--library/core/src/panic/unwind_safe.rs2
-rw-r--r--library/core/src/panicking.rs21
-rw-r--r--library/core/src/pin.rs35
-rw-r--r--library/core/src/primitive_docs.rs84
-rw-r--r--library/core/src/ptr/const_ptr.rs4
-rw-r--r--library/core/src/ptr/mod.rs331
-rw-r--r--library/core/src/ptr/mut_ptr.rs4
-rw-r--r--library/core/src/ptr/non_null.rs13
-rw-r--r--library/core/src/result.rs71
-rw-r--r--library/core/src/slice/index.rs14
-rw-r--r--library/core/src/slice/iter.rs4
-rw-r--r--library/core/src/slice/iter/macros.rs22
-rw-r--r--library/core/src/slice/mod.rs30
-rw-r--r--library/core/src/slice/sort.rs2
-rw-r--r--library/core/src/str/iter.rs4
-rw-r--r--library/core/src/str/mod.rs11
-rw-r--r--library/core/src/str/pattern.rs2
-rw-r--r--library/core/src/str/traits.rs72
-rw-r--r--library/core/src/sync/atomic.rs58
-rw-r--r--library/core/src/sync/exclusive.rs3
-rw-r--r--library/core/src/tuple.rs48
-rw-r--r--library/core/src/unicode/unicode_data.rs2577
-rw-r--r--library/core/tests/array.rs19
-rw-r--r--library/core/tests/fmt/mod.rs8
-rw-r--r--library/core/tests/iter/adapters/chain.rs41
-rw-r--r--library/core/tests/iter/adapters/enumerate.rs15
-rw-r--r--library/core/tests/iter/adapters/flatten.rs46
-rw-r--r--library/core/tests/iter/adapters/skip.rs14
-rw-r--r--library/core/tests/iter/adapters/take.rs15
-rw-r--r--library/core/tests/iter/range.rs19
-rw-r--r--library/core/tests/iter/traits/iterator.rs18
-rw-r--r--library/core/tests/lib.rs3
-rw-r--r--library/core/tests/num/dec2flt/mod.rs11
-rw-r--r--library/core/tests/num/dec2flt/parse.rs2
-rw-r--r--library/core/tests/slice.rs21
-rw-r--r--library/panic_abort/src/android.rs4
-rw-r--r--library/panic_abort/src/lib.rs2
-rw-r--r--library/panic_unwind/src/lib.rs4
-rw-r--r--library/portable-simd/crates/core_simd/src/masks/full_masks.rs2
-rw-r--r--library/proc_macro/src/bridge/fxhash.rs2
-rw-r--r--library/proc_macro/src/lib.rs6
-rw-r--r--library/std/Cargo.toml4
-rw-r--r--library/std/build.rs3
-rw-r--r--library/std/src/collections/hash/map.rs1
-rw-r--r--library/std/src/collections/hash/set.rs8
-rw-r--r--library/std/src/collections/mod.rs11
-rw-r--r--library/std/src/env.rs18
-rw-r--r--library/std/src/f32.rs36
-rw-r--r--library/std/src/f32/tests.rs16
-rw-r--r--library/std/src/f64.rs36
-rw-r--r--library/std/src/f64/tests.rs16
-rw-r--r--library/std/src/ffi/os_str.rs1
-rw-r--r--library/std/src/fs/tests.rs20
-rw-r--r--library/std/src/io/buffered/bufwriter.rs2
-rw-r--r--library/std/src/io/cursor.rs2
-rw-r--r--library/std/src/io/error.rs13
-rw-r--r--library/std/src/io/mod.rs20
-rw-r--r--library/std/src/io/stdio.rs27
-rw-r--r--library/std/src/keyword_docs.rs2
-rw-r--r--library/std/src/lib.rs31
-rw-r--r--library/std/src/net/tcp.rs12
-rw-r--r--library/std/src/net/tcp/tests.rs28
-rw-r--r--library/std/src/os/android/net.rs4
-rw-r--r--library/std/src/os/fd/owned.rs4
-rw-r--r--library/std/src/os/linux/net.rs4
-rw-r--r--library/std/src/os/linux/raw.rs1
-rw-r--r--library/std/src/os/net/linux_ext/addr.rs6
-rw-r--r--library/std/src/os/net/linux_ext/mod.rs2
-rw-r--r--library/std/src/os/unix/net/addr.rs4
-rw-r--r--library/std/src/os/unix/net/ancillary.rs141
-rw-r--r--library/std/src/os/unix/net/datagram.rs45
-rw-r--r--library/std/src/os/unix/net/listener.rs3
-rw-r--r--library/std/src/os/unix/net/stream.rs39
-rw-r--r--library/std/src/os/unix/net/tests.rs2
-rw-r--r--library/std/src/os/windows/io/handle.rs2
-rw-r--r--library/std/src/panic.rs3
-rw-r--r--library/std/src/panicking.rs16
-rw-r--r--library/std/src/path.rs48
-rw-r--r--library/std/src/personality/gcc.rs3
-rw-r--r--library/std/src/prelude/mod.rs6
-rw-r--r--library/std/src/primitive_docs.rs84
-rw-r--r--library/std/src/process.rs86
-rw-r--r--library/std/src/process/tests.rs34
-rw-r--r--library/std/src/sync/lazy_lock.rs24
-rw-r--r--library/std/src/sync/mod.rs9
-rw-r--r--library/std/src/sync/mpmc/array.rs107
-rw-r--r--library/std/src/sync/mpmc/list.rs12
-rw-r--r--library/std/src/sync/mpmc/mod.rs4
-rw-r--r--library/std/src/sync/mpsc/sync_tests.rs13
-rw-r--r--library/std/src/sync/mutex.rs24
-rw-r--r--library/std/src/sync/once_lock.rs59
-rw-r--r--library/std/src/sync/remutex.rs2
-rw-r--r--library/std/src/sys/common/alloc.rs1
-rw-r--r--library/std/src/sys/common/mod.rs1
-rw-r--r--library/std/src/sys/common/thread_local/fast_local.rs254
-rw-r--r--library/std/src/sys/common/thread_local/mod.rs109
-rw-r--r--library/std/src/sys/common/thread_local/os_local.rs197
-rw-r--r--library/std/src/sys/common/thread_local/static_local.rs115
-rw-r--r--library/std/src/sys/hermit/fs.rs2
-rw-r--r--library/std/src/sys/hermit/futex.rs6
-rw-r--r--library/std/src/sys/hermit/mod.rs14
-rw-r--r--library/std/src/sys/hermit/net.rs31
-rw-r--r--library/std/src/sys/mod.rs9
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/alloc.rs1
-rw-r--r--library/std/src/sys/sgx/fd.rs6
-rw-r--r--library/std/src/sys/sgx/net.rs6
-rw-r--r--library/std/src/sys/solid/net.rs23
-rw-r--r--library/std/src/sys/unix/fd.rs9
-rw-r--r--library/std/src/sys/unix/fs.rs9
-rw-r--r--library/std/src/sys/unix/futex.rs2
-rw-r--r--library/std/src/sys/unix/kernel_copy.rs62
-rw-r--r--library/std/src/sys/unix/kernel_copy/tests.rs42
-rw-r--r--library/std/src/sys/unix/net.rs39
-rw-r--r--library/std/src/sys/unix/os.rs6
-rw-r--r--library/std/src/sys/unix/pipe.rs6
-rw-r--r--library/std/src/sys/unix/process/process_fuchsia.rs2
-rw-r--r--library/std/src/sys/unix/rand.rs5
-rw-r--r--library/std/src/sys/unix/stdio.rs6
-rw-r--r--library/std/src/sys/unix/time.rs50
-rw-r--r--library/std/src/sys/unsupported/net.rs6
-rw-r--r--library/std/src/sys/unsupported/pipe.rs6
-rw-r--r--library/std/src/sys/wasi/fd.rs18
-rw-r--r--library/std/src/sys/wasi/fs.rs2
-rw-r--r--library/std/src/sys/wasi/net.rs6
-rw-r--r--library/std/src/sys/windows/args.rs23
-rw-r--r--library/std/src/sys/windows/c.rs87
-rw-r--r--library/std/src/sys/windows/c/errors.rs2
-rw-r--r--library/std/src/sys/windows/fs.rs46
-rw-r--r--library/std/src/sys/windows/handle.rs9
-rw-r--r--library/std/src/sys/windows/net.rs28
-rw-r--r--library/std/src/sys/windows/path.rs65
-rw-r--r--library/std/src/sys/windows/pipe.rs24
-rw-r--r--library/std/src/sys/windows/process.rs12
-rw-r--r--library/std/src/sys_common/net.rs7
-rw-r--r--library/std/src/sys_common/thread_parking/id.rs2
-rw-r--r--library/std/src/thread/local.rs567
-rw-r--r--library/std/src/thread/mod.rs40
-rw-r--r--library/stdarch/ci/docker/wasm32-wasi/Dockerfile7
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/armclang.rs2
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs2372
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs158
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/prefetch.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/aarch64/tme.rs2
-rw-r--r--library/stdarch/crates/core_arch/src/arm/armclang.rs2
-rw-r--r--library/stdarch/crates/core_arch/src/arm/mod.rs2
-rw-r--r--library/stdarch/crates/core_arch/src/arm/neon.rs88
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs2146
-rw-r--r--library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs336
-rw-r--r--library/stdarch/crates/core_arch/src/core_arch_docs.md2
-rw-r--r--library/stdarch/crates/core_arch/src/lib.rs6
-rw-r--r--library/stdarch/crates/core_arch/src/macros.rs202
-rw-r--r--library/stdarch/crates/core_arch/src/mips/msa.rs295
-rw-r--r--library/stdarch/crates/core_arch/src/mips/msa/macros.rs31
-rw-r--r--library/stdarch/crates/core_arch/src/mod.rs8
-rw-r--r--library/stdarch/crates/core_arch/src/powerpc/vsx.rs10
-rw-r--r--library/stdarch/crates/core_arch/src/riscv_shared/mod.rs12
-rw-r--r--library/stdarch/crates/core_arch/src/wasm32/memory.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/wasm32/mod.rs3
-rw-r--r--library/stdarch/crates/core_arch/src/wasm32/relaxed_simd.rs449
-rw-r--r--library/stdarch/crates/core_arch/src/wasm32/simd128.rs242
-rw-r--r--library/stdarch/crates/core_arch/src/x86/abm.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86/aes.rs14
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx.rs520
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx2.rs608
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs48
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512bw.rs1726
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512cd.rs84
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512f.rs6210
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512ifma.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512vbmi.rs58
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs408
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512vnni.rs72
-rw-r--r--library/stdarch/crates/core_arch/src/x86/avx512vpopcntdq.rs36
-rw-r--r--library/stdarch/crates/core_arch/src/x86/bmi1.rs16
-rw-r--r--library/stdarch/crates/core_arch/src/x86/bmi2.rs8
-rw-r--r--library/stdarch/crates/core_arch/src/x86/bswap.rs2
-rw-r--r--library/stdarch/crates/core_arch/src/x86/cpuid.rs2
-rw-r--r--library/stdarch/crates/core_arch/src/x86/eflags.rs8
-rw-r--r--library/stdarch/crates/core_arch/src/x86/f16c.rs10
-rw-r--r--library/stdarch/crates/core_arch/src/x86/fma.rs64
-rw-r--r--library/stdarch/crates/core_arch/src/x86/fxsr.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86/gfni.rs94
-rw-r--r--library/stdarch/crates/core_arch/src/x86/macros.rs75
-rw-r--r--library/stdarch/crates/core_arch/src/x86/pclmulqdq.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86/rdrand.rs8
-rw-r--r--library/stdarch/crates/core_arch/src/x86/rdtsc.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86/rtm.rs2
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sha.rs16
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse.rs248
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse2.rs510
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse3.rs28
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse41.rs174
-rw-r--r--library/stdarch/crates/core_arch/src/x86/sse42.rs64
-rw-r--r--library/stdarch/crates/core_arch/src/x86/ssse3.rs38
-rw-r--r--library/stdarch/crates/core_arch/src/x86/test.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86/vaes.rs16
-rw-r--r--library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs8
-rw-r--r--library/stdarch/crates/core_arch/src/x86/xsave.rs16
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/abm.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/avx.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/avx2.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/avx512f.rs60
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/bmi.rs16
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/bmi2.rs8
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/bswap.rs2
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/cmpxchg16b.rs24
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/fxsr.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/macros.rs28
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/rdrand.rs4
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/sse.rs6
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/sse2.rs22
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/sse41.rs8
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/sse42.rs2
-rw-r--r--library/stdarch/crates/core_arch/src/x86_64/xsave.rs12
-rw-r--r--library/stdarch/crates/core_arch/tests/cpu-detection.rs72
-rw-r--r--library/stdarch/crates/intrinsic-test/Cargo.toml1
-rw-r--r--library/stdarch/crates/intrinsic-test/LICENSE-APACHE201
-rw-r--r--library/stdarch/crates/intrinsic-test/LICENSE-MIT25
-rw-r--r--library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs48
-rw-r--r--library/stdarch/crates/intrinsic-test/src/main.rs51
-rw-r--r--library/stdarch/crates/std_detect/README.md18
-rw-r--r--library/stdarch/crates/std_detect/src/detect/arch/x86.rs6
-rw-r--r--library/stdarch/crates/std_detect/src/detect/mod.rs6
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/aarch64.rs112
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/freebsd/aarch64.rs18
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/freebsd/arm.rs22
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs47
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs22
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/openbsd/aarch64.rs55
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/windows/aarch64.rs36
-rw-r--r--library/stdarch/crates/std_detect/src/detect/os/x86.rs3
-rw-r--r--library/stdarch/crates/std_detect/src/lib.rs3
-rw-r--r--library/stdarch/crates/std_detect/tests/cpu-detection.rs59
-rw-r--r--library/stdarch/crates/std_detect/tests/x86-specific.rs4
-rw-r--r--library/stdarch/crates/stdarch-gen/neon.spec310
-rw-r--r--library/stdarch/crates/stdarch-gen/src/main.rs389
-rw-r--r--library/stdarch/crates/stdarch-test/Cargo.toml2
-rw-r--r--library/stdarch/crates/stdarch-test/src/lib.rs2
-rw-r--r--library/stdarch/crates/stdarch-verify/x86-intel.xml80
-rw-r--r--library/test/src/console.rs83
-rw-r--r--library/test/src/formatters/json.rs52
-rw-r--r--library/test/src/formatters/junit.rs14
-rw-r--r--library/test/src/formatters/mod.rs6
-rw-r--r--library/test/src/formatters/pretty.rs29
-rw-r--r--library/test/src/formatters/terse.rs14
-rw-r--r--library/test/src/lib.rs11
-rw-r--r--library/test/src/tests.rs200
-rw-r--r--library/test/src/types.rs10
-rw-r--r--library/unwind/src/lib.rs16
-rw-r--r--library/unwind/src/libunwind.rs3
346 files changed, 15771 insertions, 13811 deletions
diff --git a/library/alloc/benches/btree/map.rs b/library/alloc/benches/btree/map.rs
index 1f6b87fb0..ec1b0a8eb 100644
--- a/library/alloc/benches/btree/map.rs
+++ b/library/alloc/benches/btree/map.rs
@@ -1,5 +1,4 @@
use std::collections::BTreeMap;
-use std::iter::Iterator;
use std::ops::RangeBounds;
use std::vec::Vec;
diff --git a/library/alloc/benches/vec.rs b/library/alloc/benches/vec.rs
index 663f6b9dd..c1d3e1bdf 100644
--- a/library/alloc/benches/vec.rs
+++ b/library/alloc/benches/vec.rs
@@ -1,5 +1,5 @@
use rand::RngCore;
-use std::iter::{repeat, FromIterator};
+use std::iter::repeat;
use test::{black_box, Bencher};
#[bench]
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index 3a797bd5e..6f2ba957b 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -14,8 +14,6 @@ use core::ptr::{self, NonNull};
#[doc(inline)]
pub use core::alloc::*;
-use core::marker::Destruct;
-
#[cfg(test)]
mod tests;
@@ -331,16 +329,12 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
#[cfg_attr(not(test), lang = "box_free")]
#[inline]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
// This signature has to be the same as `Box`, otherwise an ICE will happen.
// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
// well.
// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
-pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Destruct>(
- ptr: Unique<T>,
- alloc: A,
-) {
+pub(crate) unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: Unique<T>, alloc: A) {
unsafe {
let size = size_of_val(ptr.as_ref());
let align = min_align_of_val(ptr.as_ref());
diff --git a/library/alloc/src/borrow.rs b/library/alloc/src/borrow.rs
index 83a138559..0c8c796ae 100644
--- a/library/alloc/src/borrow.rs
+++ b/library/alloc/src/borrow.rs
@@ -328,10 +328,9 @@ impl<B: ?Sized + ToOwned> Cow<'_, B> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_deref", issue = "88955")]
-impl<B: ?Sized + ToOwned> const Deref for Cow<'_, B>
+impl<B: ?Sized + ToOwned> Deref for Cow<'_, B>
where
- B::Owned: ~const Borrow<B>,
+ B::Owned: Borrow<B>,
{
type Target = B;
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index 44a378990..7f88327bf 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -150,16 +150,13 @@ use core::any::Any;
use core::async_iter::AsyncIterator;
use core::borrow;
use core::cmp::Ordering;
-use core::convert::{From, TryFrom};
use core::error::Error;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
-#[cfg(not(no_global_oom_handling))]
-use core::iter::FromIterator;
-use core::iter::{FusedIterator, Iterator};
+use core::iter::FusedIterator;
use core::marker::Tuple;
-use core::marker::{Destruct, Unpin, Unsize};
+use core::marker::Unsize;
use core::mem;
use core::ops::{
CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
@@ -214,6 +211,7 @@ impl<T> Box<T> {
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
+ #[rustc_diagnostic_item = "box_new"]
pub fn new(x: T) -> Self {
#[rustc_box]
Box::new(x)
@@ -375,12 +373,11 @@ impl<T, A: Allocator> Box<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline]
- pub const fn new_in(x: T, alloc: A) -> Self
+ pub fn new_in(x: T, alloc: A) -> Self
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let mut boxed = Self::new_uninit_in(alloc);
unsafe {
@@ -405,12 +402,10 @@ impl<T, A: Allocator> Box<T, A> {
/// # Ok::<(), std::alloc::AllocError>(())
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
+ pub fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
where
- T: ~const Destruct,
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let mut boxed = Self::try_new_uninit_in(alloc)?;
unsafe {
@@ -440,13 +435,12 @@ impl<T, A: Allocator> Box<T, A> {
/// assert_eq!(*five, 5)
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[cfg(not(no_global_oom_handling))]
#[must_use]
// #[unstable(feature = "new_uninit", issue = "63291")]
- pub const fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ pub fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
@@ -481,10 +475,9 @@ impl<T, A: Allocator> Box<T, A> {
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ pub fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate(layout)?.cast();
@@ -512,13 +505,12 @@ impl<T, A: Allocator> Box<T, A> {
///
/// [zeroed]: mem::MaybeUninit::zeroed
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[cfg(not(no_global_oom_handling))]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
- pub const fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ pub fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
@@ -553,10 +545,9 @@ impl<T, A: Allocator> Box<T, A> {
/// [zeroed]: mem::MaybeUninit::zeroed
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ pub fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate_zeroed(layout)?.cast();
@@ -572,12 +563,11 @@ impl<T, A: Allocator> Box<T, A> {
/// construct a (pinned) `Box` in a different way than with [`Box::new_in`].
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline(always)]
- pub const fn pin_in(x: T, alloc: A) -> Pin<Self>
+ pub fn pin_in(x: T, alloc: A) -> Pin<Self>
where
- A: 'static + ~const Allocator + ~const Destruct,
+ A: 'static + Allocator,
{
Self::into_pin(Self::new_in(x, alloc))
}
@@ -604,12 +594,8 @@ impl<T, A: Allocator> Box<T, A> {
/// assert_eq!(Box::into_inner(c), 5);
/// ```
#[unstable(feature = "box_into_inner", issue = "80437")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn into_inner(boxed: Self) -> T
- where
- Self: ~const Destruct,
- {
+ pub fn into_inner(boxed: Self) -> T {
*boxed
}
}
diff --git a/library/alloc/src/collections/binary_heap/mod.rs b/library/alloc/src/collections/binary_heap/mod.rs
index f1d0a305d..2c089bb31 100644
--- a/library/alloc/src/collections/binary_heap/mod.rs
+++ b/library/alloc/src/collections/binary_heap/mod.rs
@@ -144,7 +144,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
use core::fmt;
-use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
+use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
use core::mem::{self, swap, ManuallyDrop};
use core::num::NonZeroUsize;
use core::ops::{Deref, DerefMut};
@@ -154,8 +154,6 @@ use crate::collections::TryReserveError;
use crate::slice;
use crate::vec::{self, AsVecIntoIter, Vec};
-use super::SpecExtend;
-
#[cfg(test)]
mod tests;
@@ -265,7 +263,6 @@ mod tests;
/// more detailed analysis.
///
/// [`core::cmp::Reverse`]: core::cmp::Reverse
-/// [`Ord`]: core::cmp::Ord
/// [`Cell`]: core::cell::Cell
/// [`RefCell`]: core::cell::RefCell
/// [push]: BinaryHeap::push
@@ -400,6 +397,17 @@ impl<T: fmt::Debug> fmt::Debug for BinaryHeap<T> {
}
}
+struct RebuildOnDrop<'a, T: Ord> {
+ heap: &'a mut BinaryHeap<T>,
+ rebuild_from: usize,
+}
+
+impl<'a, T: Ord> Drop for RebuildOnDrop<'a, T> {
+ fn drop(&mut self) {
+ self.heap.rebuild_tail(self.rebuild_from);
+ }
+}
+
impl<T: Ord> BinaryHeap<T> {
/// Creates an empty `BinaryHeap` as a max-heap.
///
@@ -837,7 +845,6 @@ impl<T: Ord> BinaryHeap<T> {
/// Basic usage:
///
/// ```
- /// #![feature(binary_heap_retain)]
/// use std::collections::BinaryHeap;
///
/// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
@@ -846,35 +853,24 @@ impl<T: Ord> BinaryHeap<T> {
///
/// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
/// ```
- #[unstable(feature = "binary_heap_retain", issue = "71503")]
+ #[stable(feature = "binary_heap_retain", since = "1.70.0")]
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&T) -> bool,
{
- struct RebuildOnDrop<'a, T: Ord> {
- heap: &'a mut BinaryHeap<T>,
- first_removed: usize,
- }
-
- let mut guard = RebuildOnDrop { first_removed: self.len(), heap: self };
-
+ // rebuild_start will be updated to the first touched element below, and the rebuild will
+ // only be done for the tail.
+ let mut guard = RebuildOnDrop { rebuild_from: self.len(), heap: self };
let mut i = 0;
+
guard.heap.data.retain(|e| {
let keep = f(e);
- if !keep && i < guard.first_removed {
- guard.first_removed = i;
+ if !keep && i < guard.rebuild_from {
+ guard.rebuild_from = i;
}
i += 1;
keep
});
-
- impl<'a, T: Ord> Drop for RebuildOnDrop<'a, T> {
- fn drop(&mut self) {
- // data[..first_removed] is untouched, so we only need to
- // rebuild the tail:
- self.heap.rebuild_tail(self.first_removed);
- }
- }
}
}
@@ -1421,7 +1417,6 @@ impl<T> FusedIterator for Iter<'_, T> {}
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: BinaryHeap::into_iter
-/// [`IntoIterator`]: core::iter::IntoIterator
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Clone)]
pub struct IntoIter<T> {
@@ -1468,6 +1463,20 @@ impl<T> ExactSizeIterator for IntoIter<T> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for IntoIter<T> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T> Default for IntoIter<T> {
+ /// Creates an empty `binary_heap::IntoIter`.
+ ///
+ /// ```
+ /// # use std::collections::binary_heap;
+ /// let iter: binary_heap::IntoIter<u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ IntoIter { iter: Default::default() }
+ }
+}
+
// In addition to the SAFETY invariants of the following three unsafe traits
// also refer to the vec::in_place_collect module documentation to get an overview
#[unstable(issue = "none", feature = "inplace_iteration")]
@@ -1715,7 +1724,8 @@ impl<'a, T> IntoIterator for &'a BinaryHeap<T> {
impl<T: Ord> Extend<T> for BinaryHeap<T> {
#[inline]
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
- <Self as SpecExtend<I>>::spec_extend(self, iter);
+ let guard = RebuildOnDrop { rebuild_from: self.len(), heap: self };
+ guard.heap.data.extend(iter);
}
#[inline]
@@ -1729,37 +1739,6 @@ impl<T: Ord> Extend<T> for BinaryHeap<T> {
}
}
-impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
- default fn spec_extend(&mut self, iter: I) {
- self.extend_desugared(iter.into_iter());
- }
-}
-
-impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
- fn spec_extend(&mut self, ref mut other: Vec<T>) {
- let start = self.data.len();
- self.data.append(other);
- self.rebuild_tail(start);
- }
-}
-
-impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
- fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
- self.append(other);
- }
-}
-
-impl<T: Ord> BinaryHeap<T> {
- fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
- let iterator = iter.into_iter();
- let (lower, _) = iterator.size_hint();
-
- self.reserve(lower);
-
- iterator.for_each(move |elem| self.push(elem));
- }
-}
-
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
index 386cd1a16..afdc99817 100644
--- a/library/alloc/src/collections/btree/map.rs
+++ b/library/alloc/src/collections/btree/map.rs
@@ -3,7 +3,7 @@ use core::borrow::Borrow;
use core::cmp::Ordering;
use core::fmt::{self, Debug};
use core::hash::{Hash, Hasher};
-use core::iter::{FromIterator, FusedIterator};
+use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop};
use core::ops::{Bound, Index, RangeBounds};
@@ -362,6 +362,20 @@ impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Iter<'_, K, V> {
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<'a, K: 'a, V: 'a> Default for Iter<'a, K, V> {
+ /// Creates an empty `btree_map::Iter`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::Iter<'_, u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Iter { range: Default::default(), length: 0 }
+ }
+}
+
/// A mutable iterator over the entries of a `BTreeMap`.
///
/// This `struct` is created by the [`iter_mut`] method on [`BTreeMap`]. See its
@@ -386,13 +400,26 @@ impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IterMut<'_, K, V> {
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<'a, K: 'a, V: 'a> Default for IterMut<'a, K, V> {
+ /// Creates an empty `btree_map::IterMut`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::IterMut<'_, u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ IterMut { range: Default::default(), length: 0, _marker: PhantomData {} }
+ }
+}
+
/// An owning iterator over the entries of a `BTreeMap`.
///
/// This `struct` is created by the [`into_iter`] method on [`BTreeMap`]
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: IntoIterator::into_iter
-/// [`IntoIterator`]: core::iter::IntoIterator
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_insignificant_dtor]
pub struct IntoIter<
@@ -421,6 +448,23 @@ impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for IntoIter<K, V, A> {
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<K, V, A> Default for IntoIter<K, V, A>
+where
+ A: Allocator + Default + Clone,
+{
+ /// Creates an empty `btree_map::IntoIter`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::IntoIter<u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ IntoIter { range: Default::default(), length: 0, alloc: Default::default() }
+ }
+}
+
/// An iterator over the keys of a `BTreeMap`.
///
/// This `struct` is created by the [`keys`] method on [`BTreeMap`]. See its
@@ -605,7 +649,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn clear(&mut self) {
// avoid moving the allocator
- mem::drop(BTreeMap {
+ drop(BTreeMap {
root: mem::replace(&mut self.root, None),
length: mem::replace(&mut self.length, 0),
alloc: self.alloc.clone(),
@@ -1768,6 +1812,20 @@ impl<K, V> Clone for Keys<'_, K, V> {
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<K, V> Default for Keys<'_, K, V> {
+ /// Creates an empty `btree_map::Keys`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::Keys<'_, u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Keys { inner: Default::default() }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
@@ -1809,6 +1867,20 @@ impl<K, V> Clone for Values<'_, K, V> {
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<K, V> Default for Values<'_, K, V> {
+ /// Creates an empty `btree_map::Values`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::Values<'_, u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Values { inner: Default::default() }
+ }
+}
+
/// An iterator produced by calling `drain_filter` on BTreeMap.
#[unstable(feature = "btree_drain_filter", issue = "70530")]
pub struct DrainFilter<
@@ -1945,6 +2017,20 @@ impl<'a, K, V> Iterator for Range<'a, K, V> {
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<K, V> Default for Range<'_, K, V> {
+ /// Creates an empty `btree_map::Range`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::Range<'_, u8, u8> = Default::default();
+ /// assert_eq!(iter.count(), 0);
+ /// ```
+ fn default() -> Self {
+ Range { inner: Default::default() }
+ }
+}
+
#[stable(feature = "map_values_mut", since = "1.10.0")]
impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
type Item = &'a mut V;
@@ -2021,6 +2107,23 @@ impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoKeys<K, V, A> {
#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V, A: Allocator + Clone> FusedIterator for IntoKeys<K, V, A> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<K, V, A> Default for IntoKeys<K, V, A>
+where
+ A: Allocator + Default + Clone,
+{
+ /// Creates an empty `btree_map::IntoKeys`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::IntoKeys<u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ IntoKeys { inner: Default::default() }
+ }
+}
+
#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V, A: Allocator + Clone> Iterator for IntoValues<K, V, A> {
type Item = V;
@@ -2055,6 +2158,23 @@ impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoValues<K, V, A> {
#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V, A: Allocator + Clone> FusedIterator for IntoValues<K, V, A> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<K, V, A> Default for IntoValues<K, V, A>
+where
+ A: Allocator + Default + Clone,
+{
+ /// Creates an empty `btree_map::IntoValues`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::IntoValues<u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ IntoValues { inner: Default::default() }
+ }
+}
+
#[stable(feature = "btree_range", since = "1.17.0")]
impl<'a, K, V> DoubleEndedIterator for Range<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
@@ -3062,7 +3182,7 @@ impl<'a, K: Ord, V, A: Allocator + Clone> CursorMut<'a, K, V, A> {
panic!("key must be ordered above the current element");
}
}
- if let Some((next, _)) = self.peek_prev() {
+ if let Some((next, _)) = self.peek_next() {
if &key >= next {
panic!("key must be ordered below the next element");
}
diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs
index 76c2f27b4..da00d83bd 100644
--- a/library/alloc/src/collections/btree/map/tests.rs
+++ b/library/alloc/src/collections/btree/map/tests.rs
@@ -9,8 +9,7 @@ use crate::testing::ord_chaos::{Cyclic3, Governed, Governor};
use crate::testing::rng::DeterministicRng;
use crate::vec::Vec;
use std::cmp::Ordering;
-use std::convert::TryFrom;
-use std::iter::{self, FromIterator};
+use std::iter;
use std::mem;
use std::ops::Bound::{self, Excluded, Included, Unbounded};
use std::ops::RangeBounds;
@@ -2385,3 +2384,67 @@ fn test_cursor_mut() {
assert_eq!(cur.key(), Some(&4));
assert_eq!(map, BTreeMap::from([(0, '?'), (1, 'a'), (3, 'c'), (4, 'd')]));
}
+
+#[should_panic(expected = "key must be ordered above the previous element")]
+#[test]
+fn test_cursor_mut_insert_before_1() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_before(0, 'd');
+}
+
+#[should_panic(expected = "key must be ordered above the previous element")]
+#[test]
+fn test_cursor_mut_insert_before_2() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_before(1, 'd');
+}
+
+#[should_panic(expected = "key must be ordered below the current element")]
+#[test]
+fn test_cursor_mut_insert_before_3() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_before(2, 'd');
+}
+
+#[should_panic(expected = "key must be ordered below the current element")]
+#[test]
+fn test_cursor_mut_insert_before_4() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_before(3, 'd');
+}
+
+#[should_panic(expected = "key must be ordered above the current element")]
+#[test]
+fn test_cursor_mut_insert_after_1() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_after(1, 'd');
+}
+
+#[should_panic(expected = "key must be ordered above the current element")]
+#[test]
+fn test_cursor_mut_insert_after_2() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_after(2, 'd');
+}
+
+#[should_panic(expected = "key must be ordered below the next element")]
+#[test]
+fn test_cursor_mut_insert_after_3() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_after(3, 'd');
+}
+
+#[should_panic(expected = "key must be ordered below the next element")]
+#[test]
+fn test_cursor_mut_insert_after_4() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_after(4, 'd');
+}
diff --git a/library/alloc/src/collections/btree/mod.rs b/library/alloc/src/collections/btree/mod.rs
index 7552f2fc0..c7d0144de 100644
--- a/library/alloc/src/collections/btree/mod.rs
+++ b/library/alloc/src/collections/btree/mod.rs
@@ -13,7 +13,6 @@ pub mod set;
mod set_val;
mod split;
-#[doc(hidden)]
trait Recover<Q: ?Sized> {
type Key;
diff --git a/library/alloc/src/collections/btree/navigate.rs b/library/alloc/src/collections/btree/navigate.rs
index b890717e5..a85a31624 100644
--- a/library/alloc/src/collections/btree/navigate.rs
+++ b/library/alloc/src/collections/btree/navigate.rs
@@ -19,6 +19,12 @@ impl<'a, K: 'a, V: 'a> Clone for LeafRange<marker::Immut<'a>, K, V> {
}
}
+impl<B, K, V> Default for LeafRange<B, K, V> {
+ fn default() -> Self {
+ LeafRange { front: None, back: None }
+ }
+}
+
impl<BorrowType, K, V> LeafRange<BorrowType, K, V> {
pub fn none() -> Self {
LeafRange { front: None, back: None }
@@ -124,6 +130,12 @@ pub struct LazyLeafRange<BorrowType, K, V> {
back: Option<LazyLeafHandle<BorrowType, K, V>>,
}
+impl<B, K, V> Default for LazyLeafRange<B, K, V> {
+ fn default() -> Self {
+ LazyLeafRange { front: None, back: None }
+ }
+}
+
impl<'a, K: 'a, V: 'a> Clone for LazyLeafRange<marker::Immut<'a>, K, V> {
fn clone(&self) -> Self {
LazyLeafRange { front: self.front.clone(), back: self.back.clone() }
diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs
index 4ddb21192..da952a13f 100644
--- a/library/alloc/src/collections/btree/set.rs
+++ b/library/alloc/src/collections/btree/set.rs
@@ -1,13 +1,10 @@
-// This is pretty much entirely stolen from TreeSet, since BTreeMap has an identical interface
-// to TreeMap
-
use crate::vec::Vec;
use core::borrow::Borrow;
use core::cmp::Ordering::{self, Equal, Greater, Less};
use core::cmp::{max, min};
use core::fmt::{self, Debug};
use core::hash::{Hash, Hasher};
-use core::iter::{FromIterator, FusedIterator, Peekable};
+use core::iter::{FusedIterator, Peekable};
use core::mem::ManuallyDrop;
use core::ops::{BitAnd, BitOr, BitXor, RangeBounds, Sub};
@@ -18,8 +15,6 @@ use super::Recover;
use crate::alloc::{Allocator, Global};
-// FIXME(conventions): implement bounded iterators
-
/// An ordered set based on a B-Tree.
///
/// See [`BTreeMap`]'s documentation for a detailed discussion of this collection's performance
@@ -35,7 +30,6 @@ use crate::alloc::{Allocator, Global};
/// Iterators returned by [`BTreeSet::iter`] produce their items in order, and take worst-case
/// logarithmic and amortized constant time per item returned.
///
-/// [`Ord`]: core::cmp::Ord
/// [`Cell`]: core::cell::Cell
/// [`RefCell`]: core::cell::RefCell
///
@@ -152,7 +146,6 @@ impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: BTreeSet#method.into_iter
-/// [`IntoIterator`]: core::iter::IntoIterator
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct IntoIter<
@@ -1544,6 +1537,21 @@ impl<T, A: Allocator + Clone> Iterator for IntoIter<T, A> {
self.iter.size_hint()
}
}
+
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T> Default for Iter<'_, T> {
+ /// Creates an empty `btree_set::Iter`.
+ ///
+ /// ```
+ /// # use std::collections::btree_set;
+ /// let iter: btree_set::Iter<'_, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Iter { iter: Default::default() }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator + Clone> DoubleEndedIterator for IntoIter<T, A> {
fn next_back(&mut self) -> Option<T> {
@@ -1560,6 +1568,23 @@ impl<T, A: Allocator + Clone> ExactSizeIterator for IntoIter<T, A> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator + Clone> FusedIterator for IntoIter<T, A> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T, A> Default for IntoIter<T, A>
+where
+ A: Allocator + Default + Clone,
+{
+ /// Creates an empty `btree_set::IntoIter`.
+ ///
+ /// ```
+ /// # use std::collections::btree_set;
+ /// let iter: btree_set::IntoIter<u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ IntoIter { iter: Default::default() }
+ }
+}
+
#[stable(feature = "btree_range", since = "1.17.0")]
impl<T> Clone for Range<'_, T> {
fn clone(&self) -> Self {
@@ -1598,6 +1623,20 @@ impl<'a, T> DoubleEndedIterator for Range<'a, T> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Range<'_, T> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T> Default for Range<'_, T> {
+ /// Creates an empty `btree_set::Range`.
+ ///
+ /// ```
+ /// # use std::collections::btree_set;
+ /// let iter: btree_set::Range<'_, u8> = Default::default();
+ /// assert_eq!(iter.count(), 0);
+ /// ```
+ fn default() -> Self {
+ Range { iter: Default::default() }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator + Clone> Clone for Difference<'_, T, A> {
fn clone(&self) -> Self {
diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs
index 7b8d41a60..a7c839d77 100644
--- a/library/alloc/src/collections/btree/set/tests.rs
+++ b/library/alloc/src/collections/btree/set/tests.rs
@@ -4,7 +4,6 @@ use crate::testing::rng::DeterministicRng;
use crate::vec::Vec;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
-use std::iter::FromIterator;
use std::ops::Bound::{Excluded, Included};
use std::panic::{catch_unwind, AssertUnwindSafe};
diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs
index f2f5dffc2..106d05c57 100644
--- a/library/alloc/src/collections/linked_list.rs
+++ b/library/alloc/src/collections/linked_list.rs
@@ -15,7 +15,7 @@
use core::cmp::Ordering;
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::iter::{FromIterator, FusedIterator};
+use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
use core::ptr::NonNull;
@@ -130,7 +130,6 @@ impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: LinkedList::into_iter
-/// [`IntoIterator`]: core::iter::IntoIterator
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<T> {
@@ -1075,6 +1074,20 @@ impl<T> ExactSizeIterator for Iter<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Iter<'_, T> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T> Default for Iter<'_, T> {
+ /// Creates an empty `linked_list::Iter`.
+ ///
+ /// ```
+ /// # use std::collections::linked_list;
+ /// let iter: linked_list::Iter<'_, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Iter { head: None, tail: None, len: 0, marker: Default::default() }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for IterMut<'a, T> {
type Item = &'a mut T;
@@ -1129,6 +1142,13 @@ impl<T> ExactSizeIterator for IterMut<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for IterMut<'_, T> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T> Default for IterMut<'_, T> {
+ fn default() -> Self {
+ IterMut { head: None, tail: None, len: 0, marker: Default::default() }
+ }
+}
+
/// A cursor over a `LinkedList`.
///
/// A `Cursor` is like an iterator, except that it can freely seek back-and-forth.
@@ -1808,6 +1828,20 @@ impl<T> ExactSizeIterator for IntoIter<T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for IntoIter<T> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T> Default for IntoIter<T> {
+ /// Creates an empty `linked_list::IntoIter`.
+ ///
+ /// ```
+ /// # use std::collections::linked_list;
+ /// let iter: linked_list::IntoIter<u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ LinkedList::new().into_iter()
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> FromIterator<T> for LinkedList<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
diff --git a/library/alloc/src/collections/vec_deque/drain.rs b/library/alloc/src/collections/vec_deque/drain.rs
index 89feb361d..0be274a38 100644
--- a/library/alloc/src/collections/vec_deque/drain.rs
+++ b/library/alloc/src/collections/vec_deque/drain.rs
@@ -52,36 +52,22 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
}
}
- // Only returns pointers to the slices, as that's
- // all we need to drop them. May only be called if `self.remaining != 0`.
+ // Only returns pointers to the slices, as that's all we need
+ // to drop them. May only be called if `self.remaining != 0`.
unsafe fn as_slices(&self) -> (*mut [T], *mut [T]) {
unsafe {
let deque = self.deque.as_ref();
- // FIXME: This is doing almost exactly the same thing as the else branch in `VecDeque::slice_ranges`.
- // Unfortunately, we can't just call `slice_ranges` here, as the deque's `len` is currently
- // just `drain_start`, so the range check would (almost) always panic. Between temporarily
- // adjusting the deques `len` to call `slice_ranges`, and just copy pasting the `slice_ranges`
- // implementation, this seemed like the less hacky solution, though it might be good to
- // find a better one in the future.
-
- // because `self.remaining != 0`, we know that `self.idx < deque.original_len`, so it's a valid
- // logical index.
- let wrapped_start = deque.to_physical_idx(self.idx);
-
- let head_len = deque.capacity() - wrapped_start;
-
- let (a_range, b_range) = if head_len >= self.remaining {
- (wrapped_start..wrapped_start + self.remaining, 0..0)
- } else {
- let tail_len = self.remaining - head_len;
- (wrapped_start..deque.capacity(), 0..tail_len)
- };
-
- // SAFETY: the range `self.idx..self.idx+self.remaining` lies strictly inside
- // the range `0..deque.original_len`. because of this, and because of the fact
- // that we acquire `a_range` and `b_range` exactly like `slice_ranges` would,
- // it's guaranteed that `a_range` and `b_range` represent valid ranges into
- // the deques buffer.
+
+ // We know that `self.idx + self.remaining <= deque.len <= usize::MAX`, so this won't overflow.
+ let logical_remaining_range = self.idx..self.idx + self.remaining;
+
+ // SAFETY: `logical_remaining_range` represents the
+ // range into the logical buffer of elements that
+ // haven't been drained yet, so they're all initialized,
+ // and `slice::range(start..end, end) == start..end`,
+ // so the preconditions for `slice_ranges` are met.
+ let (a_range, b_range) =
+ deque.slice_ranges(logical_remaining_range.clone(), logical_remaining_range.end);
(deque.buffer_range(a_range), deque.buffer_range(b_range))
}
}
diff --git a/library/alloc/src/collections/vec_deque/into_iter.rs b/library/alloc/src/collections/vec_deque/into_iter.rs
index 34bc0ce91..d9e274df0 100644
--- a/library/alloc/src/collections/vec_deque/into_iter.rs
+++ b/library/alloc/src/collections/vec_deque/into_iter.rs
@@ -1,4 +1,5 @@
use core::iter::{FusedIterator, TrustedLen};
+use core::num::NonZeroUsize;
use core::{array, fmt, mem::MaybeUninit, ops::Try, ptr};
use crate::alloc::{Allocator, Global};
@@ -11,7 +12,6 @@ use super::VecDeque;
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: VecDeque::into_iter
-/// [`IntoIterator`]: core::iter::IntoIterator
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<
@@ -54,15 +54,16 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- if self.inner.len < n {
- let len = self.inner.len;
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ let len = self.inner.len;
+ let rem = if len < n {
self.inner.clear();
- Err(len)
+ n - len
} else {
self.inner.drain(..n);
- Ok(())
- }
+ 0
+ };
+ NonZeroUsize::new(rem).map_or(Ok(()), Err)
}
#[inline]
@@ -182,15 +183,16 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let len = self.inner.len;
- if len >= n {
- self.inner.truncate(len - n);
- Ok(())
- } else {
+ let rem = if len < n {
self.inner.clear();
- Err(len)
- }
+ n - len
+ } else {
+ self.inner.truncate(len - n);
+ 0
+ };
+ NonZeroUsize::new(rem).map_or(Ok(()), Err)
}
fn try_rfold<B, F, R>(&mut self, mut init: B, mut f: F) -> R
diff --git a/library/alloc/src/collections/vec_deque/iter.rs b/library/alloc/src/collections/vec_deque/iter.rs
index d9f393714..646a2a991 100644
--- a/library/alloc/src/collections/vec_deque/iter.rs
+++ b/library/alloc/src/collections/vec_deque/iter.rs
@@ -1,4 +1,5 @@
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
+use core::num::NonZeroUsize;
use core::ops::Try;
use core::{fmt, mem, slice};
@@ -55,13 +56,15 @@ impl<'a, T> Iterator for Iter<'a, T> {
}
}
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- let m = match self.i1.advance_by(n) {
- Ok(_) => return Ok(()),
- Err(m) => m,
- };
- mem::swap(&mut self.i1, &mut self.i2);
- self.i1.advance_by(n - m).map_err(|o| o + m)
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ let remaining = self.i1.advance_by(n);
+ match remaining {
+ Ok(()) => return Ok(()),
+ Err(n) => {
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i1.advance_by(n.get())
+ }
+ }
}
#[inline]
@@ -125,14 +128,14 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
}
}
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
- let m = match self.i2.advance_back_by(n) {
- Ok(_) => return Ok(()),
- Err(m) => m,
- };
-
- mem::swap(&mut self.i1, &mut self.i2);
- self.i2.advance_back_by(n - m).map_err(|o| m + o)
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ match self.i2.advance_back_by(n) {
+ Ok(()) => return Ok(()),
+ Err(n) => {
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i2.advance_back_by(n.get())
+ }
+ }
}
fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
diff --git a/library/alloc/src/collections/vec_deque/iter_mut.rs b/library/alloc/src/collections/vec_deque/iter_mut.rs
index 2c59d95cd..7defbb109 100644
--- a/library/alloc/src/collections/vec_deque/iter_mut.rs
+++ b/library/alloc/src/collections/vec_deque/iter_mut.rs
@@ -1,4 +1,5 @@
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
+use core::num::NonZeroUsize;
use core::ops::Try;
use core::{fmt, mem, slice};
@@ -47,13 +48,14 @@ impl<'a, T> Iterator for IterMut<'a, T> {
}
}
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- let m = match self.i1.advance_by(n) {
- Ok(_) => return Ok(()),
- Err(m) => m,
- };
- mem::swap(&mut self.i1, &mut self.i2);
- self.i1.advance_by(n - m).map_err(|o| o + m)
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ match self.i1.advance_by(n) {
+ Ok(()) => return Ok(()),
+ Err(remaining) => {
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i1.advance_by(remaining.get())
+ }
+ }
}
#[inline]
@@ -117,14 +119,14 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
}
}
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
- let m = match self.i2.advance_back_by(n) {
- Ok(_) => return Ok(()),
- Err(m) => m,
- };
-
- mem::swap(&mut self.i1, &mut self.i2);
- self.i2.advance_back_by(n - m).map_err(|o| m + o)
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ match self.i2.advance_back_by(n) {
+ Ok(()) => return Ok(()),
+ Err(remaining) => {
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i2.advance_back_by(remaining.get())
+ }
+ }
}
fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 8317ac431..8916b42ed 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -10,7 +10,7 @@
use core::cmp::{self, Ordering};
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::iter::{repeat_n, repeat_with, ByRefSized, FromIterator};
+use core::iter::{repeat_n, repeat_with, ByRefSized};
use core::mem::{ManuallyDrop, SizedTypeProperties};
use core::ops::{Index, IndexMut, Range, RangeBounds};
use core::ptr;
@@ -1156,7 +1156,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_slices(&self) -> (&[T], &[T]) {
- let (a_range, b_range) = self.slice_ranges(..);
+ let (a_range, b_range) = self.slice_ranges(.., self.len);
// SAFETY: `slice_ranges` always returns valid ranges into
// the physical buffer.
unsafe { (&*self.buffer_range(a_range), &*self.buffer_range(b_range)) }
@@ -1190,7 +1190,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
- let (a_range, b_range) = self.slice_ranges(..);
+ let (a_range, b_range) = self.slice_ranges(.., self.len);
// SAFETY: `slice_ranges` always returns valid ranges into
// the physical buffer.
unsafe { (&mut *self.buffer_range(a_range), &mut *self.buffer_range(b_range)) }
@@ -1232,19 +1232,28 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// Given a range into the logical buffer of the deque, this function
/// return two ranges into the physical buffer that correspond to
- /// the given range.
- fn slice_ranges<R>(&self, range: R) -> (Range<usize>, Range<usize>)
+ /// the given range. The `len` parameter should usually just be `self.len`;
+ /// the reason it's passed explicitly is that if the deque is wrapped in
+ /// a `Drain`, then `self.len` is not actually the length of the deque.
+ ///
+ /// # Safety
+ ///
+ /// This function is always safe to call. For the resulting ranges to be valid
+ /// ranges into the physical buffer, the caller must ensure that the result of
+ /// calling `slice::range(range, ..len)` represents a valid range into the
+ /// logical buffer, and that all elements in that range are initialized.
+ fn slice_ranges<R>(&self, range: R, len: usize) -> (Range<usize>, Range<usize>)
where
R: RangeBounds<usize>,
{
- let Range { start, end } = slice::range(range, ..self.len);
+ let Range { start, end } = slice::range(range, ..len);
let len = end - start;
if len == 0 {
(0..0, 0..0)
} else {
- // `slice::range` guarantees that `start <= end <= self.len`.
- // because `len != 0`, we know that `start < end`, so `start < self.len`
+ // `slice::range` guarantees that `start <= end <= len`.
+ // because `len != 0`, we know that `start < end`, so `start < len`
// and the indexing is valid.
let wrapped_start = self.to_physical_idx(start);
@@ -1290,7 +1299,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
where
R: RangeBounds<usize>,
{
- let (a_range, b_range) = self.slice_ranges(range);
+ let (a_range, b_range) = self.slice_ranges(range, self.len);
// SAFETY: The ranges returned by `slice_ranges`
// are valid ranges into the physical buffer, so
// it's ok to pass them to `buffer_range` and
@@ -1330,7 +1339,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
where
R: RangeBounds<usize>,
{
- let (a_range, b_range) = self.slice_ranges(range);
+ let (a_range, b_range) = self.slice_ranges(range, self.len);
// SAFETY: The ranges returned by `slice_ranges`
// are valid ranges into the physical buffer, so
// it's ok to pass them to `buffer_range` and
@@ -2385,7 +2394,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
/// Binary searches this `VecDeque` for a given element.
- /// This behaves similarly to [`contains`] if this `VecDeque` is sorted.
+ /// If the `VecDeque` is not sorted, the returned result is unspecified and
+ /// meaningless.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
@@ -2395,7 +2405,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
///
/// See also [`binary_search_by`], [`binary_search_by_key`], and [`partition_point`].
///
- /// [`contains`]: VecDeque::contains
/// [`binary_search_by`]: VecDeque::binary_search_by
/// [`binary_search_by_key`]: VecDeque::binary_search_by_key
/// [`partition_point`]: VecDeque::partition_point
@@ -2441,12 +2450,13 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
/// Binary searches this `VecDeque` with a comparator function.
- /// This behaves similarly to [`contains`] if this `VecDeque` is sorted.
///
- /// The comparator function should implement an order consistent
- /// with the sort order of the deque, returning an order code that
- /// indicates whether its argument is `Less`, `Equal` or `Greater`
- /// than the desired target.
+ /// The comparator function should return an order code that indicates
+ /// whether its argument is `Less`, `Equal` or `Greater` the desired
+ /// target.
+ /// If the `VecDeque` is not sorted or if the comparator function does not
+ /// implement an order consistent with the sort order of the underlying
+ /// `VecDeque`, the returned result is unspecified and meaningless.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
@@ -2456,7 +2466,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
///
/// See also [`binary_search`], [`binary_search_by_key`], and [`partition_point`].
///
- /// [`contains`]: VecDeque::contains
/// [`binary_search`]: VecDeque::binary_search
/// [`binary_search_by_key`]: VecDeque::binary_search_by_key
/// [`partition_point`]: VecDeque::partition_point
@@ -2496,10 +2505,11 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
/// Binary searches this `VecDeque` with a key extraction function.
- /// This behaves similarly to [`contains`] if this `VecDeque` is sorted.
///
/// Assumes that the deque is sorted by the key, for instance with
/// [`make_contiguous().sort_by_key()`] using the same key extraction function.
+ /// If the deque is not sorted by the key, the returned result is
+ /// unspecified and meaningless.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
@@ -2509,7 +2519,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
///
/// See also [`binary_search`], [`binary_search_by`], and [`partition_point`].
///
- /// [`contains`]: VecDeque::contains
/// [`make_contiguous().sort_by_key()`]: VecDeque::make_contiguous
/// [`binary_search`]: VecDeque::binary_search
/// [`binary_search_by`]: VecDeque::binary_search_by
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index e9cc3875f..aa240c37e 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -87,9 +87,14 @@
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
-#![cfg_attr(not(bootstrap), warn(multiple_supertrait_upcastable))]
+#![warn(multiple_supertrait_upcastable)]
//
// Library features:
+// tidy-alphabetical-start
+#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
+#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
+#![cfg_attr(test, feature(is_sorted))]
+#![cfg_attr(test, feature(new_uninit))]
#![feature(alloc_layout_extra)]
#![feature(allocator_api)]
#![feature(array_chunks)]
@@ -99,23 +104,21 @@
#![feature(assert_matches)]
#![feature(async_iterator)]
#![feature(coerce_unsized)]
-#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
+#![feature(const_align_of_val)]
#![feature(const_box)]
-#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
-#![feature(const_cow_is_borrowed)]
#![feature(const_convert)]
-#![feature(const_size_of_val)]
-#![feature(const_align_of_val)]
-#![feature(const_ptr_read)]
-#![feature(const_maybe_uninit_zeroed)]
-#![feature(const_maybe_uninit_write)]
+#![feature(const_cow_is_borrowed)]
+#![feature(const_eval_select)]
#![feature(const_maybe_uninit_as_mut_ptr)]
+#![feature(const_maybe_uninit_write)]
+#![feature(const_maybe_uninit_zeroed)]
+#![feature(const_pin)]
+#![feature(const_ptr_read)]
#![feature(const_refs_to_cell)]
+#![feature(const_size_of_val)]
+#![feature(const_waker)]
#![feature(core_intrinsics)]
#![feature(core_panic)]
-#![feature(const_eval_select)]
-#![feature(const_pin)]
-#![feature(const_waker)]
#![feature(dispatch_from_dyn)]
#![feature(error_generic_member_access)]
#![feature(error_in_core)]
@@ -126,7 +129,6 @@
#![feature(hasher_prefixfree_extras)]
#![feature(inline_const)]
#![feature(inplace_iteration)]
-#![cfg_attr(test, feature(is_sorted))]
#![feature(iter_advance_by)]
#![feature(iter_next_chunk)]
#![feature(iter_repeat_n)]
@@ -134,8 +136,6 @@
#![feature(maybe_uninit_slice)]
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_uninit_array_transpose)]
-#![cfg_attr(test, feature(new_uninit))]
-#![feature(nonnull_slice_from_raw_parts)]
#![feature(pattern)]
#![feature(pointer_byte_offsets)]
#![feature(provide_any)]
@@ -151,6 +151,7 @@
#![feature(slice_ptr_get)]
#![feature(slice_ptr_len)]
#![feature(slice_range)]
+#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
#![feature(trusted_len)]
@@ -161,41 +162,43 @@
#![feature(unicode_internals)]
#![feature(unsize)]
#![feature(utf8_chunks)]
-#![feature(std_internals)]
+// tidy-alphabetical-end
//
// Language features:
+// tidy-alphabetical-start
+#![cfg_attr(not(test), feature(generator_trait))]
+#![cfg_attr(test, feature(panic_update_hook))]
+#![cfg_attr(test, feature(test))]
#![feature(allocator_internals)]
#![feature(allow_internal_unstable)]
#![feature(associated_type_bounds)]
+#![feature(c_unwind)]
#![feature(cfg_sanitize)]
#![feature(const_deref)]
#![feature(const_mut_refs)]
-#![feature(const_ptr_write)]
#![feature(const_precise_live_drops)]
+#![feature(const_ptr_write)]
#![feature(const_trait_impl)]
#![feature(const_try)]
#![feature(dropck_eyepatch)]
#![feature(exclusive_range_pattern)]
#![feature(fundamental)]
-#![cfg_attr(not(test), feature(generator_trait))]
#![feature(hashmap_internals)]
#![feature(lang_items)]
#![feature(min_specialization)]
+#![feature(multiple_supertrait_upcastable)]
#![feature(negative_impls)]
#![feature(never_type)]
+#![feature(pointer_is_aligned)]
#![feature(rustc_allow_const_fn_unstable)]
#![feature(rustc_attrs)]
-#![feature(pointer_is_aligned)]
#![feature(slice_internals)]
#![feature(staged_api)]
#![feature(stmt_expr_attributes)]
-#![cfg_attr(test, feature(test))]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
-#![feature(c_unwind)]
#![feature(with_negative_coherence)]
-#![cfg_attr(test, feature(panic_update_hook))]
-#![cfg_attr(not(bootstrap), feature(multiple_supertrait_upcastable))]
+// tidy-alphabetical-end
//
// Rustdoc features:
#![feature(doc_cfg)]
diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs
index 3751f2a24..dfd30d99c 100644
--- a/library/alloc/src/raw_vec.rs
+++ b/library/alloc/src/raw_vec.rs
@@ -4,7 +4,6 @@ use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
-use core::ops::Drop;
use core::ptr::{self, NonNull, Unique};
use core::slice;
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index 932a537c5..ba035fb06 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -15,7 +15,7 @@
//!
//! [`Rc`] uses non-atomic reference counting. This means that overhead is very
//! low, but an [`Rc`] cannot be sent between threads, and consequently [`Rc`]
-//! does not implement [`Send`][send]. As a result, the Rust compiler
+//! does not implement [`Send`]. As a result, the Rust compiler
//! will check *at compile time* that you are not sending [`Rc`]s between
//! threads. If you need multi-threaded, atomic reference counting, use
//! [`sync::Arc`][arc].
@@ -232,7 +232,6 @@
//! [clone]: Clone::clone
//! [`Cell`]: core::cell::Cell
//! [`RefCell`]: core::cell::RefCell
-//! [send]: core::marker::Send
//! [arc]: crate::sync::Arc
//! [`Deref`]: core::ops::Deref
//! [downgrade]: Rc::downgrade
@@ -251,13 +250,12 @@ use core::any::Any;
use core::borrow;
use core::cell::Cell;
use core::cmp::Ordering;
-use core::convert::{From, TryFrom};
use core::fmt;
use core::hash::{Hash, Hasher};
use core::intrinsics::abort;
#[cfg(not(no_global_oom_handling))]
use core::iter;
-use core::marker::{self, PhantomData, Unpin, Unsize};
+use core::marker::{PhantomData, Unsize};
#[cfg(not(no_global_oom_handling))]
use core::mem::size_of_val;
use core::mem::{self, align_of_val_raw, forget};
@@ -321,7 +319,7 @@ pub struct Rc<T: ?Sized> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> !marker::Send for Rc<T> {}
+impl<T: ?Sized> !Send for Rc<T> {}
// Note that this negative impl isn't strictly necessary for correctness,
// as `Rc` transitively contains a `Cell`, which is itself `!Sync`.
@@ -329,7 +327,7 @@ impl<T: ?Sized> !marker::Send for Rc<T> {}
// having an explicit negative impl is nice for documentation purposes
// and results in nicer error messages.
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> !marker::Sync for Rc<T> {}
+impl<T: ?Sized> !Sync for Rc<T> {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Rc<T> {}
@@ -681,6 +679,24 @@ impl<T> Rc<T> {
Err(this)
}
}
+
+ /// Returns the inner value, if the `Rc` has exactly one strong reference.
+ ///
+ /// Otherwise, [`None`] is returned and the `Rc` is dropped.
+ ///
+ /// This will succeed even if there are outstanding weak references.
+ ///
+ /// If `Rc::into_inner` is called on every clone of this `Rc`,
+ /// it is guaranteed that exactly one of the calls returns the inner value.
+ /// This means in particular that the inner value is not dropped.
+ ///
+ /// This is equivalent to `Rc::try_unwrap(this).ok()`. (Note that these are not equivalent for
+ /// [`Arc`](crate::sync::Arc), due to race conditions that do not apply to `Rc`.)
+ #[inline]
+ #[stable(feature = "rc_into_inner", since = "1.70.0")]
+ pub fn into_inner(this: Self) -> Option<T> {
+ Rc::try_unwrap(this).ok()
+ }
}
impl<T> Rc<[T]> {
@@ -1042,7 +1058,7 @@ impl<T: ?Sized> Rc<T> {
#[inline]
#[stable(feature = "rc_mutate_strong_count", since = "1.53.0")]
pub unsafe fn decrement_strong_count(ptr: *const T) {
- unsafe { mem::drop(Rc::from_raw(ptr)) };
+ unsafe { drop(Rc::from_raw(ptr)) };
}
/// Returns `true` if there are no other `Rc` or [`Weak`] pointers to
@@ -1478,7 +1494,7 @@ impl<T> Rc<[T]> {
///
/// Behavior is undefined should the size be wrong.
#[cfg(not(no_global_oom_handling))]
- unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Rc<[T]> {
+ unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Rc<[T]> {
// Panic guard while cloning T elements.
// In the event of a panic, elements that have been written
// into the new RcBox will be dropped, then the memory freed.
@@ -1720,11 +1736,11 @@ impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
/// Inequality for two `Rc`s.
///
- /// Two `Rc`s are unequal if their inner values are unequal.
+ /// Two `Rc`s are not equal if their inner values are not equal.
///
/// If `T` also implements `Eq` (implying reflexivity of equality),
/// two `Rc`s that point to the same allocation are
- /// never unequal.
+ /// always equal.
///
/// # Examples
///
@@ -2070,7 +2086,7 @@ impl<T, const N: usize> TryFrom<Rc<[T]>> for Rc<[T; N]> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shared_from_iter", since = "1.37.0")]
-impl<T> iter::FromIterator<T> for Rc<[T]> {
+impl<T> FromIterator<T> for Rc<[T]> {
/// Takes each element in the `Iterator` and collects it into an `Rc<[T]>`.
///
/// # Performance characteristics
@@ -2109,7 +2125,7 @@ impl<T> iter::FromIterator<T> for Rc<[T]> {
/// let evens: Rc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
/// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
/// ```
- fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
ToRcSlice::to_rc_slice(iter.into_iter())
}
}
@@ -2186,9 +2202,9 @@ pub struct Weak<T: ?Sized> {
}
#[stable(feature = "rc_weak", since = "1.4.0")]
-impl<T: ?Sized> !marker::Send for Weak<T> {}
+impl<T: ?Sized> !Send for Weak<T> {}
#[stable(feature = "rc_weak", since = "1.4.0")]
-impl<T: ?Sized> !marker::Sync for Weak<T> {}
+impl<T: ?Sized> !Sync for Weak<T> {}
#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
diff --git a/library/alloc/src/rc/tests.rs b/library/alloc/src/rc/tests.rs
index 32433cfbd..2784108e0 100644
--- a/library/alloc/src/rc/tests.rs
+++ b/library/alloc/src/rc/tests.rs
@@ -152,6 +152,21 @@ fn try_unwrap() {
}
#[test]
+fn into_inner() {
+ let x = Rc::new(3);
+ assert_eq!(Rc::into_inner(x), Some(3));
+
+ let x = Rc::new(4);
+ let y = Rc::clone(&x);
+ assert_eq!(Rc::into_inner(x), None);
+ assert_eq!(Rc::into_inner(y), Some(4));
+
+ let x = Rc::new(5);
+ let _w = Rc::downgrade(&x);
+ assert_eq!(Rc::into_inner(x), Some(5));
+}
+
+#[test]
fn into_from_raw() {
let x = Rc::new(Box::new("hello"));
let y = x.clone();
diff --git a/library/alloc/src/str.rs b/library/alloc/src/str.rs
index afbe5cfaf..b87ef59f6 100644
--- a/library/alloc/src/str.rs
+++ b/library/alloc/src/str.rs
@@ -256,7 +256,7 @@ impl str {
/// assert_eq!("than an old", s.replace("is", "an"));
/// ```
///
- /// When the pattern doesn't match:
+ /// When the pattern doesn't match, it returns this string slice as [`String`]:
///
/// ```
/// let s = "this is old";
@@ -297,7 +297,7 @@ impl str {
/// assert_eq!("foo foo new23 foo", s.replacen(char::is_numeric, "new", 1));
/// ```
///
- /// When the pattern doesn't match:
+ /// When the pattern doesn't match, it returns this string slice as [`String`]:
///
/// ```
/// let s = "this is old";
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index c7e7ed3e9..be41919b9 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -45,9 +45,9 @@
use core::error::Error;
use core::fmt;
use core::hash;
-use core::iter::FusedIterator;
#[cfg(not(no_global_oom_handling))]
-use core::iter::{from_fn, FromIterator};
+use core::iter::from_fn;
+use core::iter::FusedIterator;
#[cfg(not(no_global_oom_handling))]
use core::ops::Add;
#[cfg(not(no_global_oom_handling))]
@@ -359,7 +359,7 @@ use crate::vec::Vec;
/// [Deref]: core::ops::Deref "ops::Deref"
/// [`Deref`]: core::ops::Deref "ops::Deref"
/// [`as_str()`]: String::as_str
-#[derive(PartialOrd, Eq, Ord)]
+#[derive(PartialEq, PartialOrd, Eq, Ord)]
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), lang = "String")]
pub struct String {
@@ -2207,14 +2207,6 @@ impl<'a, 'b> Pattern<'a> for &'b String {
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl PartialEq for String {
- #[inline]
- fn eq(&self, other: &String) -> bool {
- PartialEq::eq(&self[..], &other[..])
- }
-}
-
macro_rules! impl_eq {
($lhs:ty, $rhs: ty) => {
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index fdd341a06..24849d52d 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -11,14 +11,13 @@
use core::any::Any;
use core::borrow;
use core::cmp::Ordering;
-use core::convert::{From, TryFrom};
use core::fmt;
use core::hash::{Hash, Hasher};
use core::hint;
use core::intrinsics::abort;
#[cfg(not(no_global_oom_handling))]
use core::iter;
-use core::marker::{PhantomData, Unpin, Unsize};
+use core::marker::{PhantomData, Unsize};
#[cfg(not(no_global_oom_handling))]
use core::mem::size_of_val;
use core::mem::{self, align_of_val_raw};
@@ -51,8 +50,16 @@ mod tests;
///
/// Going above this limit will abort your program (although not
/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
+/// Trying to go above it might call a `panic` (if not actually going above it).
+///
+/// This is a global invariant, and also applies when using a compare-exchange loop.
+///
+/// See comment in `Arc::clone`.
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
+/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
+const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
+
#[cfg(not(sanitize = "thread"))]
macro_rules! acquire {
($x:expr) => {
@@ -180,8 +187,6 @@ macro_rules! acquire {
/// [mutex]: ../../std/sync/struct.Mutex.html
/// [rwlock]: ../../std/sync/struct.RwLock.html
/// [atomic]: core::sync::atomic
-/// [`Send`]: core::marker::Send
-/// [`Sync`]: core::marker::Sync
/// [deref]: core::ops::Deref
/// [downgrade]: Arc::downgrade
/// [upgrade]: Weak::upgrade
@@ -654,20 +659,17 @@ impl<T> Arc<T> {
///
/// This will succeed even if there are outstanding weak references.
///
- // FIXME: when `Arc::into_inner` is stabilized, add this paragraph:
- /*
/// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
/// want to keep the `Arc` in the [`Err`] case.
/// Immediately dropping the [`Err`] payload, like in the expression
/// `Arc::try_unwrap(this).ok()`, can still cause the strong count to
/// drop to zero and the inner value of the `Arc` to be dropped:
- /// For instance if two threads execute this expression in parallel, then
+ /// For instance if two threads each execute this expression in parallel, then
/// there is a race condition. The threads could first both check whether they
/// have the last clone of their `Arc` via `Arc::try_unwrap`, and then
/// both drop their `Arc` in the call to [`ok`][`Result::ok`],
/// taking the strong count from two down to zero.
///
- */
/// # Examples
///
/// ```
@@ -711,20 +713,13 @@ impl<T> Arc<T> {
/// This means in particular that the inner value is not dropped.
///
/// The similar expression `Arc::try_unwrap(this).ok()` does not
- /// offer such a guarantee. See the last example below.
- //
- // FIXME: when `Arc::into_inner` is stabilized, add this to end
- // of the previous sentence:
- /*
+ /// offer such a guarantee. See the last example below
/// and the documentation of [`Arc::try_unwrap`].
- */
///
/// # Examples
///
/// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
/// ```
- /// #![feature(arc_into_inner)]
- ///
/// use std::sync::Arc;
///
/// let x = Arc::new(3);
@@ -748,8 +743,6 @@ impl<T> Arc<T> {
///
/// A more practical example demonstrating the need for `Arc::into_inner`:
/// ```
- /// #![feature(arc_into_inner)]
- ///
/// use std::sync::Arc;
///
/// // Definition of a simple singly linked list using `Arc`:
@@ -799,13 +792,8 @@ impl<T> Arc<T> {
/// x_thread.join().unwrap();
/// y_thread.join().unwrap();
/// ```
-
- // FIXME: when `Arc::into_inner` is stabilized, adjust above documentation
- // and the documentation of `Arc::try_unwrap` according to the `FIXME`s. Also
- // open an issue on rust-lang/rust-clippy, asking for a lint against
- // `Arc::try_unwrap(...).ok()`.
#[inline]
- #[unstable(feature = "arc_into_inner", issue = "106894")]
+ #[stable(feature = "arc_into_inner", since = "1.70.0")]
pub fn into_inner(this: Self) -> Option<T> {
// Make sure that the ordinary `Drop` implementation isn’t called as well
let mut this = mem::ManuallyDrop::new(this);
@@ -1104,6 +1092,9 @@ impl<T: ?Sized> Arc<T> {
continue;
}
+ // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
+ assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
+
// NOTE: this code currently ignores the possibility of overflow
// into usize::MAX; in general both Rc and Arc need to be adjusted
// to deal with overflow.
@@ -1247,7 +1238,7 @@ impl<T: ?Sized> Arc<T> {
#[inline]
#[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
pub unsafe fn decrement_strong_count(ptr: *const T) {
- unsafe { mem::drop(Arc::from_raw(ptr)) };
+ unsafe { drop(Arc::from_raw(ptr)) };
}
#[inline]
@@ -1410,7 +1401,7 @@ impl<T> Arc<[T]> {
///
/// Behavior is undefined should the size be wrong.
#[cfg(not(no_global_oom_handling))]
- unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> {
+ unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
// Panic guard while cloning T elements.
// In the event of a panic, elements that have been written
// into the new ArcInner will be dropped, then the memory freed.
@@ -1519,6 +1510,11 @@ impl<T: ?Sized> Clone for Arc<T> {
// the worst already happened and we actually do overflow the `usize` counter. However, that
// requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
// above and the `abort` below, which seems exceedingly unlikely.
+ //
+ // This is a global invariant, and also applies when using a compare-exchange loop to increment
+ // counters in other methods.
+ // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
+ // and then overflow using a few `fetch_add`s.
if old_size > MAX_REFCOUNT {
abort();
}
@@ -2180,9 +2176,7 @@ impl<T: ?Sized> Weak<T> {
return None;
}
// See comments in `Arc::clone` for why we do this (for `mem::forget`).
- if n > MAX_REFCOUNT {
- abort();
- }
+ assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
Some(n + 1)
})
.ok()
@@ -2461,10 +2455,10 @@ impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
/// Inequality for two `Arc`s.
///
- /// Two `Arc`s are unequal if their inner values are unequal.
+ /// Two `Arc`s are not equal if their inner values are not equal.
///
/// If `T` also implements `Eq` (implying reflexivity of equality),
- /// two `Arc`s that point to the same value are never unequal.
+ /// two `Arc`s that point to the same value are always equal.
///
/// # Examples
///
@@ -2821,7 +2815,7 @@ impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shared_from_iter", since = "1.37.0")]
-impl<T> iter::FromIterator<T> for Arc<[T]> {
+impl<T> FromIterator<T> for Arc<[T]> {
/// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
///
/// # Performance characteristics
@@ -2860,7 +2854,7 @@ impl<T> iter::FromIterator<T> for Arc<[T]> {
/// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
/// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
/// ```
- fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
ToArcSlice::to_arc_slice(iter.into_iter())
}
}
diff --git a/library/alloc/src/tests.rs b/library/alloc/src/tests.rs
index 299ed156a..b1d3a9fa8 100644
--- a/library/alloc/src/tests.rs
+++ b/library/alloc/src/tests.rs
@@ -4,7 +4,6 @@ use core::any::Any;
use core::clone::Clone;
use core::convert::TryInto;
use core::ops::Deref;
-use core::result::Result::{Err, Ok};
use std::boxed::Box;
@@ -15,7 +14,7 @@ fn test_owned_clone() {
assert!(a == b);
}
-#[derive(PartialEq, Eq)]
+#[derive(Debug, PartialEq, Eq)]
struct Test;
#[test]
@@ -23,24 +22,17 @@ fn any_move() {
let a = Box::new(8) as Box<dyn Any>;
let b = Box::new(Test) as Box<dyn Any>;
- match a.downcast::<i32>() {
- Ok(a) => {
- assert!(a == Box::new(8));
- }
- Err(..) => panic!(),
- }
- match b.downcast::<Test>() {
- Ok(a) => {
- assert!(a == Box::new(Test));
- }
- Err(..) => panic!(),
- }
+ let a: Box<i32> = a.downcast::<i32>().unwrap();
+ assert_eq!(*a, 8);
+
+ let b: Box<Test> = b.downcast::<Test>().unwrap();
+ assert_eq!(*b, Test);
let a = Box::new(8) as Box<dyn Any>;
let b = Box::new(Test) as Box<dyn Any>;
- assert!(a.downcast::<Box<Test>>().is_err());
- assert!(b.downcast::<Box<i32>>().is_err());
+ assert!(a.downcast::<Box<i32>>().is_err());
+ assert!(b.downcast::<Box<Test>>().is_err());
}
#[test]
diff --git a/library/alloc/src/vec/cow.rs b/library/alloc/src/vec/cow.rs
index 64943a273..2c799605b 100644
--- a/library/alloc/src/vec/cow.rs
+++ b/library/alloc/src/vec/cow.rs
@@ -1,5 +1,4 @@
use crate::borrow::Cow;
-use core::iter::FromIterator;
use super::Vec;
diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs
index 37966007e..b2db2fdfd 100644
--- a/library/alloc/src/vec/into_iter.rs
+++ b/library/alloc/src/vec/into_iter.rs
@@ -11,6 +11,7 @@ use core::iter::{
};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
+use core::num::NonZeroUsize;
#[cfg(not(no_global_oom_handling))]
use core::ops::Deref;
use core::ptr::{self, NonNull};
@@ -107,7 +108,7 @@ impl<T, A: Allocator> IntoIter<T, A> {
/// ```
/// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
/// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter());
- /// (&mut into_iter).for_each(core::mem::drop);
+ /// (&mut into_iter).for_each(drop);
/// std::mem::forget(into_iter);
/// ```
///
@@ -213,7 +214,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
if T::IS_ZST {
@@ -227,10 +228,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
unsafe {
ptr::drop_in_place(to_drop);
}
- if step_size < n {
- return Err(step_size);
- }
- Ok(())
+ NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
}
#[inline]
@@ -313,7 +311,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let step_size = self.len().min(n);
if T::IS_ZST {
// SAFETY: same as for advance_by()
@@ -327,10 +325,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
unsafe {
ptr::drop_in_place(to_drop);
}
- if step_size < n {
- return Err(step_size);
- }
- Ok(())
+ NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
}
}
@@ -347,6 +342,24 @@ impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T, A> Default for IntoIter<T, A>
+where
+ A: Allocator + Default,
+{
+ /// Creates an empty `vec::IntoIter`.
+ ///
+ /// ```
+ /// # use std::vec;
+ /// let iter: vec::IntoIter<u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// assert_eq!(iter.as_slice(), &[]);
+ /// ```
+ fn default() -> Self {
+ super::Vec::new_in(Default::default()).into_iter()
+ }
+}
+
#[doc(hidden)]
#[unstable(issue = "none", feature = "std_internals")]
#[rustc_unsafe_specialization_marker]
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index f2aa30f18..3736a6e0b 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -56,13 +56,9 @@
#[cfg(not(no_global_oom_handling))]
use core::cmp;
use core::cmp::Ordering;
-use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::intrinsics::assume;
use core::iter;
-#[cfg(not(no_global_oom_handling))]
-use core::iter::FromIterator;
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ops::{self, Index, IndexMut, Range, RangeBounds};
@@ -1240,11 +1236,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn as_ptr(&self) -> *const T {
// We shadow the slice method of the same name to avoid going through
// `deref`, which creates an intermediate reference.
- let ptr = self.buf.ptr();
- unsafe {
- assume(!ptr.is_null());
- }
- ptr
+ self.buf.ptr()
}
/// Returns an unsafe mutable pointer to the vector's buffer, or a dangling
@@ -1277,11 +1269,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn as_mut_ptr(&mut self) -> *mut T {
// We shadow the slice method of the same name to avoid going through
// `deref_mut`, which creates an intermediate reference.
- let ptr = self.buf.ptr();
- unsafe {
- assume(!ptr.is_null());
- }
- ptr
+ self.buf.ptr()
}
/// Returns a reference to the underlying allocator.
@@ -2999,7 +2987,7 @@ impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
}
}
-/// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+/// Implements comparison of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
#[inline]
@@ -3011,7 +2999,7 @@ impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq, A: Allocator> Eq for Vec<T, A> {}
-/// Implements ordering of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+/// Implements ordering of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord, A: Allocator> Ord for Vec<T, A> {
#[inline]
diff --git a/library/alloc/tests/boxed.rs b/library/alloc/tests/boxed.rs
index af49826ff..68ebd8e35 100644
--- a/library/alloc/tests/boxed.rs
+++ b/library/alloc/tests/boxed.rs
@@ -179,18 +179,3 @@ unsafe impl const Allocator for ConstAllocator {
self
}
}
-
-#[test]
-fn const_box() {
- const VALUE: u32 = {
- let mut boxed = Box::new_in(1u32, ConstAllocator);
- assert!(*boxed == 1);
-
- *boxed = 42;
- assert!(*boxed == 42);
-
- *Box::leak(boxed)
- };
-
- assert!(VALUE == 42);
-}
diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs
index 2a93a242d..0667cd7bc 100644
--- a/library/alloc/tests/lib.rs
+++ b/library/alloc/tests/lib.rs
@@ -3,12 +3,11 @@
#![feature(assert_matches)]
#![feature(btree_drain_filter)]
#![feature(cow_is_borrowed)]
-#![feature(const_box)]
#![feature(const_convert)]
#![feature(const_cow_is_borrowed)]
#![feature(const_heap)]
#![feature(const_mut_refs)]
-#![feature(const_nonnull_slice_from_raw_parts)]
+#![feature(const_slice_from_raw_parts_mut)]
#![feature(const_ptr_write)]
#![feature(const_try)]
#![feature(core_intrinsics)]
@@ -25,7 +24,6 @@
#![feature(binary_heap_into_iter_sorted)]
#![feature(binary_heap_drain_sorted)]
#![feature(slice_ptr_get)]
-#![feature(binary_heap_retain)]
#![feature(binary_heap_as_slice)]
#![feature(inplace_iteration)]
#![feature(iter_advance_by)]
@@ -38,13 +36,11 @@
#![feature(const_default_impls)]
#![feature(const_trait_impl)]
#![feature(const_str_from_utf8)]
-#![feature(nonnull_slice_from_raw_parts)]
#![feature(panic_update_hook)]
#![feature(pointer_is_aligned)]
#![feature(slice_flatten)]
#![feature(thin_box)]
#![feature(strict_provenance)]
-#![feature(once_cell)]
#![feature(drain_keep_rest)]
#![deny(fuzzy_provenance_casts)]
#![deny(unsafe_op_in_unsafe_fn)]
diff --git a/library/alloc/tests/str.rs b/library/alloc/tests/str.rs
index 4d182be02..c1dbbde08 100644
--- a/library/alloc/tests/str.rs
+++ b/library/alloc/tests/str.rs
@@ -1499,13 +1499,25 @@ fn test_split_whitespace() {
#[test]
fn test_lines() {
- let data = "\nMäry häd ä little lämb\n\r\nLittle lämb\n";
- let lines: Vec<&str> = data.lines().collect();
- assert_eq!(lines, ["", "Märy häd ä little lämb", "", "Little lämb"]);
-
- let data = "\r\nMäry häd ä little lämb\n\nLittle lämb"; // no trailing \n
- let lines: Vec<&str> = data.lines().collect();
- assert_eq!(lines, ["", "Märy häd ä little lämb", "", "Little lämb"]);
+ fn t(data: &str, expected: &[&str]) {
+ let lines: Vec<&str> = data.lines().collect();
+ assert_eq!(lines, expected);
+ }
+ t("", &[]);
+ t("\n", &[""]);
+ t("\n2nd", &["", "2nd"]);
+ t("\r\n", &[""]);
+ t("bare\r", &["bare\r"]);
+ t("bare\rcr", &["bare\rcr"]);
+ t("Text\n\r", &["Text", "\r"]);
+ t(
+ "\nMäry häd ä little lämb\n\r\nLittle lämb\n",
+ &["", "Märy häd ä little lämb", "", "Little lämb"],
+ );
+ t(
+ "\r\nMäry häd ä little lämb\n\nLittle lämb",
+ &["", "Märy häd ä little lämb", "", "Little lämb"],
+ );
}
#[test]
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
index 2f07c2911..3ee16f04e 100644
--- a/library/alloc/tests/vec.rs
+++ b/library/alloc/tests/vec.rs
@@ -1,5 +1,7 @@
use core::alloc::{Allocator, Layout};
+use core::assert_eq;
use core::iter::IntoIterator;
+use core::num::NonZeroUsize;
use core::ptr::NonNull;
use std::alloc::System;
use std::assert_matches::assert_matches;
@@ -1062,21 +1064,21 @@ fn test_into_iter_leak() {
#[test]
fn test_into_iter_advance_by() {
- let mut i = [1, 2, 3, 4, 5].into_iter();
- i.advance_by(0).unwrap();
- i.advance_back_by(0).unwrap();
+ let mut i = vec![1, 2, 3, 4, 5].into_iter();
+ assert_eq!(i.advance_by(0), Ok(()));
+ assert_eq!(i.advance_back_by(0), Ok(()));
assert_eq!(i.as_slice(), [1, 2, 3, 4, 5]);
- i.advance_by(1).unwrap();
- i.advance_back_by(1).unwrap();
+ assert_eq!(i.advance_by(1), Ok(()));
+ assert_eq!(i.advance_back_by(1), Ok(()));
assert_eq!(i.as_slice(), [2, 3, 4]);
- assert_eq!(i.advance_back_by(usize::MAX), Err(3));
+ assert_eq!(i.advance_back_by(usize::MAX), Err(NonZeroUsize::new(usize::MAX - 3).unwrap()));
- assert_eq!(i.advance_by(usize::MAX), Err(0));
+ assert_eq!(i.advance_by(usize::MAX), Err(NonZeroUsize::new(usize::MAX).unwrap()));
- i.advance_by(0).unwrap();
- i.advance_back_by(0).unwrap();
+ assert_eq!(i.advance_by(0), Ok(()));
+ assert_eq!(i.advance_back_by(0), Ok(()));
assert_eq!(i.len(), 0);
}
@@ -1124,7 +1126,7 @@ fn test_into_iter_zst() {
for _ in vec![C; 5].into_iter().rev() {}
let mut it = vec![C, C].into_iter();
- it.advance_by(1).unwrap();
+ assert_eq!(it.advance_by(1), Ok(()));
drop(it);
let mut it = vec![C, C].into_iter();
diff --git a/library/alloc/tests/vec_deque.rs b/library/alloc/tests/vec_deque.rs
index 5a0b852e8..ddc27e34e 100644
--- a/library/alloc/tests/vec_deque.rs
+++ b/library/alloc/tests/vec_deque.rs
@@ -1,3 +1,4 @@
+use core::num::NonZeroUsize;
use std::assert_matches::assert_matches;
use std::collections::TryReserveErrorKind::*;
use std::collections::{vec_deque::Drain, VecDeque};
@@ -426,6 +427,28 @@ fn test_into_iter() {
assert_eq!(it.next(), Some(7));
assert_eq!(it.size_hint(), (5, Some(5)));
}
+
+ // advance_by
+ {
+ let mut d = VecDeque::new();
+ for i in 0..=4 {
+ d.push_back(i);
+ }
+ for i in 6..=8 {
+ d.push_front(i);
+ }
+
+ let mut it = d.into_iter();
+ assert_eq!(it.advance_by(1), Ok(()));
+ assert_eq!(it.next(), Some(7));
+ assert_eq!(it.advance_back_by(1), Ok(()));
+ assert_eq!(it.next_back(), Some(3));
+
+ let mut it = VecDeque::from(vec![1, 2, 3, 4, 5]).into_iter();
+ assert_eq!(it.advance_by(10), Err(NonZeroUsize::new(5).unwrap()));
+ let mut it = VecDeque::from(vec![1, 2, 3, 4, 5]).into_iter();
+ assert_eq!(it.advance_back_by(10), Err(NonZeroUsize::new(5).unwrap()));
+ }
}
#[test]
diff --git a/library/core/benches/lib.rs b/library/core/benches/lib.rs
index e4100120d..74ef0949b 100644
--- a/library/core/benches/lib.rs
+++ b/library/core/benches/lib.rs
@@ -20,6 +20,7 @@ mod ops;
mod pattern;
mod slice;
mod str;
+mod tuple;
/// Returns a `rand::Rng` seeded with a consistent seed.
///
diff --git a/library/core/benches/tuple.rs b/library/core/benches/tuple.rs
new file mode 100644
index 000000000..d9ff9d0dd
--- /dev/null
+++ b/library/core/benches/tuple.rs
@@ -0,0 +1,22 @@
+use rand::prelude::*;
+use test::{black_box, Bencher};
+
+#[bench]
+fn bench_tuple_comparison(b: &mut Bencher) {
+ let mut rng = black_box(super::bench_rng());
+
+ let data = black_box([
+ ("core::iter::adapters::Chain", 123_usize),
+ ("core::iter::adapters::Clone", 456_usize),
+ ("core::iter::adapters::Copie", 789_usize),
+ ("core::iter::adapters::Cycle", 123_usize),
+ ("core::iter::adapters::Flatt", 456_usize),
+ ("core::iter::adapters::TakeN", 789_usize),
+ ]);
+
+ b.iter(|| {
+ let x = data.choose(&mut rng).unwrap();
+ let y = data.choose(&mut rng).unwrap();
+ [x < y, x <= y, x > y, x >= y]
+ });
+}
diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs
index a6082455f..ff390322d 100644
--- a/library/core/src/alloc/mod.rs
+++ b/library/core/src/alloc/mod.rs
@@ -95,10 +95,10 @@ impl fmt::Display for AllocError {
/// # Safety
///
/// * Memory blocks returned from an allocator must point to valid memory and retain their validity
-/// until the instance and all of its clones are dropped,
+/// until the instance and all of its copies and clones are dropped,
///
-/// * cloning or moving the allocator must not invalidate memory blocks returned from this
-/// allocator. A cloned allocator must behave like the same allocator, and
+/// * copying, cloning, or moving the allocator must not invalidate memory blocks returned from this
+/// allocator. A copied or cloned allocator must behave like the same allocator, and
///
/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other
/// method of the allocator.
diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs
index 8259c087d..73e2c2cfb 100644
--- a/library/core/src/array/iter.rs
+++ b/library/core/src/array/iter.rs
@@ -1,5 +1,6 @@
//! Defines the `IntoIter` owned iterator for arrays.
+use crate::num::NonZeroUsize;
use crate::{
fmt,
iter::{self, ExactSizeIterator, FusedIterator, TrustedLen},
@@ -284,12 +285,11 @@ impl<T, const N: usize> Iterator for IntoIter<T, N> {
self.next_back()
}
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- let original_len = self.len();
-
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
// This also moves the start, which marks them as conceptually "dropped",
// so if anything goes bad then our drop impl won't double-free them.
let range_to_drop = self.alive.take_prefix(n);
+ let remaining = n - range_to_drop.len();
// SAFETY: These elements are currently initialized, so it's fine to drop them.
unsafe {
@@ -297,7 +297,7 @@ impl<T, const N: usize> Iterator for IntoIter<T, N> {
ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(slice));
}
- if n > original_len { Err(original_len) } else { Ok(()) }
+ NonZeroUsize::new(remaining).map_or(Ok(()), Err)
}
}
@@ -334,12 +334,11 @@ impl<T, const N: usize> DoubleEndedIterator for IntoIter<T, N> {
})
}
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
- let original_len = self.len();
-
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
// This also moves the end, which marks them as conceptually "dropped",
// so if anything goes bad then our drop impl won't double-free them.
let range_to_drop = self.alive.take_suffix(n);
+ let remaining = n - range_to_drop.len();
// SAFETY: These elements are currently initialized, so it's fine to drop them.
unsafe {
@@ -347,7 +346,7 @@ impl<T, const N: usize> DoubleEndedIterator for IntoIter<T, N> {
ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(slice));
}
- if n > original_len { Err(original_len) } else { Ok(()) }
+ NonZeroUsize::new(remaining).map_or(Ok(()), Err)
}
}
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index 897d03595..33d928e23 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -11,36 +11,77 @@
//! mutate it.
//!
//! Shareable mutable containers exist to permit mutability in a controlled manner, even in the
-//! presence of aliasing. Both [`Cell<T>`] and [`RefCell<T>`] allow doing this in a single-threaded
-//! way. However, neither `Cell<T>` nor `RefCell<T>` are thread safe (they do not implement
-//! [`Sync`]). If you need to do aliasing and mutation between multiple threads it is possible to
-//! use [`Mutex<T>`], [`RwLock<T>`] or [`atomic`] types.
+//! presence of aliasing. [`Cell<T>`], [`RefCell<T>`], and [`OnceCell<T>`] allow doing this in
+//! a single-threaded way—they do not implement [`Sync`]. (If you need to do aliasing and
+//! mutation among multiple threads, [`Mutex<T>`], [`RwLock<T>`], [`OnceLock<T>`] or [`atomic`]
+//! types are the correct data structures to do so).
//!
-//! Values of the `Cell<T>` and `RefCell<T>` types may be mutated through shared references (i.e.
-//! the common `&T` type), whereas most Rust types can only be mutated through unique (`&mut T`)
-//! references. We say that `Cell<T>` and `RefCell<T>` provide 'interior mutability', in contrast
-//! with typical Rust types that exhibit 'inherited mutability'.
+//! Values of the `Cell<T>`, `RefCell<T>`, and `OnceCell<T>` types may be mutated through shared
+//! references (i.e. the common `&T` type), whereas most Rust types can only be mutated through
+//! unique (`&mut T`) references. We say these cell types provide 'interior mutability'
+//! (mutable via `&T`), in contrast with typical Rust types that exhibit 'inherited mutability'
+//! (mutable only via `&mut T`).
//!
-//! Cell types come in two flavors: `Cell<T>` and `RefCell<T>`. `Cell<T>` implements interior
-//! mutability by moving values in and out of the `Cell<T>`. To use references instead of values,
-//! one must use the `RefCell<T>` type, acquiring a write lock before mutating. `Cell<T>` provides
-//! methods to retrieve and change the current interior value:
+//! Cell types come in three flavors: `Cell<T>`, `RefCell<T>`, and `OnceCell<T>`. Each provides
+//! a different way of providing safe interior mutability.
+//!
+//! ## `Cell<T>`
+//!
+//! [`Cell<T>`] implements interior mutability by moving values in and out of the cell. That is, an
+//! `&mut T` to the inner value can never be obtained, and the value itself cannot be directly
+//! obtained without replacing it with something else. Both of these rules ensure that there is
+//! never more than one reference pointing to the inner value. This type provides the following
+//! methods:
//!
//! - For types that implement [`Copy`], the [`get`](Cell::get) method retrieves the current
-//! interior value.
+//! interior value by duplicating it.
//! - For types that implement [`Default`], the [`take`](Cell::take) method replaces the current
//! interior value with [`Default::default()`] and returns the replaced value.
-//! - For all types, the [`replace`](Cell::replace) method replaces the current interior value and
-//! returns the replaced value and the [`into_inner`](Cell::into_inner) method consumes the
-//! `Cell<T>` and returns the interior value. Additionally, the [`set`](Cell::set) method
-//! replaces the interior value, dropping the replaced value.
+//! - All types have:
+//! - [`replace`](Cell::replace): replaces the current interior value and returns the replaced
+//! value.
+//! - [`into_inner`](Cell::into_inner): this method consumes the `Cell<T>` and returns the
+//! interior value.
+//! - [`set`](Cell::set): this method replaces the interior value, dropping the replaced value.
+//!
+//! `Cell<T>` is typically used for more simple types where copying or moving values isn't too
+//! resource intensive (e.g. numbers), and should usually be preferred over other cell types when
+//! possible. For larger and non-copy types, `RefCell` provides some advantages.
+//!
+//! ## `RefCell<T>`
//!
-//! `RefCell<T>` uses Rust's lifetimes to implement 'dynamic borrowing', a process whereby one can
+//! [`RefCell<T>`] uses Rust's lifetimes to implement "dynamic borrowing", a process whereby one can
//! claim temporary, exclusive, mutable access to the inner value. Borrows for `RefCell<T>`s are
-//! tracked 'at runtime', unlike Rust's native reference types which are entirely tracked
-//! statically, at compile time. Because `RefCell<T>` borrows are dynamic it is possible to attempt
-//! to borrow a value that is already mutably borrowed; when this happens it results in thread
-//! panic.
+//! tracked at _runtime_, unlike Rust's native reference types which are entirely tracked
+//! statically, at compile time.
+//!
+//! An immutable reference to a `RefCell`'s inner value (`&T`) can be obtained with
+//! [`borrow`](`RefCell::borrow`), and a mutable borrow (`&mut T`) can be obtained with
+//! [`borrow_mut`](`RefCell::borrow_mut`). When these functions are called, they first verify that
+//! Rust's borrow rules will be satisfied: any number of immutable borrows are allowed or a
+//! single immutable borrow is allowed, but never both. If a borrow is attempted that would violate
+//! these rules, the thread will panic.
+//!
+//! The corresponding [`Sync`] version of `RefCell<T>` is [`RwLock<T>`].
+//!
+//! ## `OnceCell<T>`
+//!
+//! [`OnceCell<T>`] is somewhat of a hybrid of `Cell` and `RefCell` that works for values that
+//! typically only need to be set once. This means that a reference `&T` can be obtained without
+//! moving or copying the inner value (unlike `Cell`) but also without runtime checks (unlike
+//! `RefCell`). However, its value can also not be updated once set unless you have a mutable
+//! reference to the `OnceCell`.
+//!
+//! `OnceCell` provides the following methods:
+//!
+//! - [`get`](OnceCell::get): obtain a reference to the inner value
+//! - [`set`](OnceCell::set): set the inner value if it is unset (returns a `Result`)
+//! - [`get_or_init`](OnceCell::get_or_init): return the inner value, initializing it if needed
+//! - [`get_mut`](OnceCell::get_mut): provide a mutable reference to the inner value, only available
+//! if you have a mutable reference to the cell itself.
+//!
+//! The corresponding [`Sync`] version of `OnceCell<T>` is [`OnceLock<T>`].
+//!
//!
//! # When to choose interior mutability
//!
@@ -188,6 +229,8 @@
//! [`Rc<T>`]: ../../std/rc/struct.Rc.html
//! [`RwLock<T>`]: ../../std/sync/struct.RwLock.html
//! [`Mutex<T>`]: ../../std/sync/struct.Mutex.html
+//! [`OnceLock<T>`]: ../../std/sync/struct.OnceLock.html
+//! [`Sync`]: ../../std/marker/trait.Sync.html
//! [`atomic`]: crate::sync::atomic
#![stable(feature = "rust1", since = "1.0.0")]
@@ -202,13 +245,19 @@ use crate::ptr::{self, NonNull};
mod lazy;
mod once;
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
pub use lazy::LazyCell;
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
pub use once::OnceCell;
/// A mutable memory location.
///
+/// # Memory layout
+///
+/// `Cell<T>` has the same [memory layout and caveats as
+/// `UnsafeCell<T>`](UnsafeCell#memory-layout). In particular, this means that
+/// `Cell<T>` has the same in-memory representation as its inner type `T`.
+///
/// # Examples
///
/// In this example, you can see that `Cell<T>` enables mutation inside an
@@ -413,7 +462,7 @@ impl<T> Cell<T> {
mem::replace(unsafe { &mut *self.value.get() }, val)
}
- /// Unwraps the value.
+ /// Unwraps the value, consuming the cell.
///
/// # Examples
///
@@ -1767,7 +1816,7 @@ impl<T: ?Sized + fmt::Display> fmt::Display for RefMut<'_, T> {
/// `UnsafeCell<T>` opts-out of the immutability guarantee for `&T`: a shared reference
/// `&UnsafeCell<T>` may point to data that is being mutated. This is called "interior mutability".
///
-/// All other types that allow internal mutability, such as `Cell<T>` and `RefCell<T>`, internally
+/// All other types that allow internal mutability, such as [`Cell<T>`] and [`RefCell<T>`], internally
/// use `UnsafeCell` to wrap their data.
///
/// Note that only the immutability guarantee for shared references is affected by `UnsafeCell`. The
@@ -1963,7 +2012,7 @@ impl<T> UnsafeCell<T> {
UnsafeCell { value }
}
- /// Unwraps the value.
+ /// Unwraps the value, consuming the cell.
///
/// # Examples
///
@@ -2127,7 +2176,7 @@ impl<T> SyncUnsafeCell<T> {
Self { value: UnsafeCell { value } }
}
- /// Unwraps the value.
+ /// Unwraps the value, consuming the cell.
#[inline]
pub const fn into_inner(self) -> T {
self.value.into_inner()
diff --git a/library/core/src/cell/lazy.rs b/library/core/src/cell/lazy.rs
index 65d12c25c..44adcfa1a 100644
--- a/library/core/src/cell/lazy.rs
+++ b/library/core/src/cell/lazy.rs
@@ -1,6 +1,13 @@
-use crate::cell::{Cell, OnceCell};
-use crate::fmt;
use crate::ops::Deref;
+use crate::{fmt, mem};
+
+use super::UnsafeCell;
+
+enum State<T, F> {
+ Uninit(F),
+ Init(T),
+ Poisoned,
+}
/// A value which is initialized on the first access.
///
@@ -11,7 +18,7 @@ use crate::ops::Deref;
/// # Examples
///
/// ```
-/// #![feature(once_cell)]
+/// #![feature(lazy_cell)]
///
/// use std::cell::LazyCell;
///
@@ -29,10 +36,9 @@ use crate::ops::Deref;
/// // 92
/// // 92
/// ```
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
pub struct LazyCell<T, F = fn() -> T> {
- cell: OnceCell<T>,
- init: Cell<Option<F>>,
+ state: UnsafeCell<State<T, F>>,
}
impl<T, F: FnOnce() -> T> LazyCell<T, F> {
@@ -41,7 +47,7 @@ impl<T, F: FnOnce() -> T> LazyCell<T, F> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
+ /// #![feature(lazy_cell)]
///
/// use std::cell::LazyCell;
///
@@ -52,9 +58,9 @@ impl<T, F: FnOnce() -> T> LazyCell<T, F> {
/// assert_eq!(&*lazy, "HELLO, WORLD!");
/// ```
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
- pub const fn new(init: F) -> LazyCell<T, F> {
- LazyCell { cell: OnceCell::new(), init: Cell::new(Some(init)) }
+ #[unstable(feature = "lazy_cell", issue = "109736")]
+ pub const fn new(f: F) -> LazyCell<T, F> {
+ LazyCell { state: UnsafeCell::new(State::Uninit(f)) }
}
/// Forces the evaluation of this lazy value and returns a reference to
@@ -65,7 +71,7 @@ impl<T, F: FnOnce() -> T> LazyCell<T, F> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
+ /// #![feature(lazy_cell)]
///
/// use std::cell::LazyCell;
///
@@ -75,16 +81,71 @@ impl<T, F: FnOnce() -> T> LazyCell<T, F> {
/// assert_eq!(&*lazy, &92);
/// ```
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[unstable(feature = "lazy_cell", issue = "109736")]
pub fn force(this: &LazyCell<T, F>) -> &T {
- this.cell.get_or_init(|| match this.init.take() {
- Some(f) => f(),
- None => panic!("`Lazy` instance has previously been poisoned"),
- })
+ // SAFETY:
+ // This invalidates any mutable references to the data. The resulting
+ // reference lives either until the end of the borrow of `this` (in the
+ // initialized case) or is invalidated in `really_init` (in the
+ // uninitialized case; `really_init` will create and return a fresh reference).
+ let state = unsafe { &*this.state.get() };
+ match state {
+ State::Init(data) => data,
+ // SAFETY: The state is uninitialized.
+ State::Uninit(_) => unsafe { LazyCell::really_init(this) },
+ State::Poisoned => panic!("LazyCell has previously been poisoned"),
+ }
+ }
+
+ /// # Safety
+ /// May only be called when the state is `Uninit`.
+ #[cold]
+ unsafe fn really_init(this: &LazyCell<T, F>) -> &T {
+ // SAFETY:
+ // This function is only called when the state is uninitialized,
+ // so no references to `state` can exist except for the reference
+ // in `force`, which is invalidated here and not accessed again.
+ let state = unsafe { &mut *this.state.get() };
+ // Temporarily mark the state as poisoned. This prevents reentrant
+ // accesses and correctly poisons the cell if the closure panicked.
+ let State::Uninit(f) = mem::replace(state, State::Poisoned) else { unreachable!() };
+
+ let data = f();
+
+ // SAFETY:
+ // If the closure accessed the cell through something like a reentrant
+ // mutex, but caught the panic resulting from the state being poisoned,
+ // the mutable borrow for `state` will be invalidated, so we need to
+ // go through the `UnsafeCell` pointer here. The state can only be
+ // poisoned at this point, so using `write` to skip the destructor
+ // of `State` should help the optimizer.
+ unsafe { this.state.get().write(State::Init(data)) };
+
+ // SAFETY:
+ // The previous references were invalidated by the `write` call above,
+ // so do a new shared borrow of the state instead.
+ let state = unsafe { &*this.state.get() };
+ let State::Init(data) = state else { unreachable!() };
+ data
+ }
+}
+
+impl<T, F> LazyCell<T, F> {
+ #[inline]
+ fn get(&self) -> Option<&T> {
+ // SAFETY:
+ // This is sound for the same reason as in `force`: once the state is
+ // initialized, it will not be mutably accessed again, so this reference
+ // will stay valid for the duration of the borrow to `self`.
+ let state = unsafe { &*self.state.get() };
+ match state {
+ State::Init(data) => Some(data),
+ _ => None,
+ }
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
impl<T, F: FnOnce() -> T> Deref for LazyCell<T, F> {
type Target = T;
#[inline]
@@ -93,7 +154,7 @@ impl<T, F: FnOnce() -> T> Deref for LazyCell<T, F> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
impl<T: Default> Default for LazyCell<T> {
/// Creates a new lazy value using `Default` as the initializing function.
#[inline]
@@ -102,9 +163,14 @@ impl<T: Default> Default for LazyCell<T> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
impl<T: fmt::Debug, F> fmt::Debug for LazyCell<T, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Lazy").field("cell", &self.cell).field("init", &"..").finish()
+ let mut d = f.debug_tuple("LazyCell");
+ match self.get() {
+ Some(data) => d.field(data),
+ None => d.field(&format_args!("<uninit>")),
+ };
+ d.finish()
}
}
diff --git a/library/core/src/cell/once.rs b/library/core/src/cell/once.rs
index f74e563f1..f7cd3ec5f 100644
--- a/library/core/src/cell/once.rs
+++ b/library/core/src/cell/once.rs
@@ -4,8 +4,10 @@ use crate::mem;
/// A cell which can be written to only once.
///
-/// Unlike [`RefCell`], a `OnceCell` only provides shared `&T` references to its value.
-/// Unlike [`Cell`], a `OnceCell` doesn't require copying or replacing the value to access it.
+/// This allows obtaining a shared `&T` reference to its inner value without copying or replacing
+/// it (unlike [`Cell`]), and without runtime borrow checks (unlike [`RefCell`]). However,
+/// only immutable references can be obtained unless one has a mutable reference to the cell
+/// itself.
///
/// For a thread-safe version of this struct, see [`std::sync::OnceLock`].
///
@@ -16,8 +18,6 @@ use crate::mem;
/// # Examples
///
/// ```
-/// #![feature(once_cell)]
-///
/// use std::cell::OnceCell;
///
/// let cell = OnceCell::new();
@@ -29,7 +29,7 @@ use crate::mem;
/// assert_eq!(value, "Hello, World!");
/// assert!(cell.get().is_some());
/// ```
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
pub struct OnceCell<T> {
// Invariant: written to at most once.
inner: UnsafeCell<Option<T>>,
@@ -39,7 +39,8 @@ impl<T> OnceCell<T> {
/// Creates a new empty cell.
#[inline]
#[must_use]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
+ #[rustc_const_stable(feature = "once_cell", since = "1.70.0")]
pub const fn new() -> OnceCell<T> {
OnceCell { inner: UnsafeCell::new(None) }
}
@@ -48,7 +49,7 @@ impl<T> OnceCell<T> {
///
/// Returns `None` if the cell is empty.
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
pub fn get(&self) -> Option<&T> {
// SAFETY: Safe due to `inner`'s invariant
unsafe { &*self.inner.get() }.as_ref()
@@ -58,7 +59,7 @@ impl<T> OnceCell<T> {
///
/// Returns `None` if the cell is empty.
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
pub fn get_mut(&mut self) -> Option<&mut T> {
self.inner.get_mut().as_mut()
}
@@ -73,8 +74,6 @@ impl<T> OnceCell<T> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
- ///
/// use std::cell::OnceCell;
///
/// let cell = OnceCell::new();
@@ -86,7 +85,7 @@ impl<T> OnceCell<T> {
/// assert!(cell.get().is_some());
/// ```
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
pub fn set(&self, value: T) -> Result<(), T> {
// SAFETY: Safe because we cannot have overlapping mutable borrows
let slot = unsafe { &*self.inner.get() };
@@ -117,8 +116,6 @@ impl<T> OnceCell<T> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
- ///
/// use std::cell::OnceCell;
///
/// let cell = OnceCell::new();
@@ -128,7 +125,7 @@ impl<T> OnceCell<T> {
/// assert_eq!(value, &92);
/// ```
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
pub fn get_or_init<F>(&self, f: F) -> &T
where
F: FnOnce() -> T,
@@ -153,7 +150,7 @@ impl<T> OnceCell<T> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
+ /// #![feature(once_cell_try)]
///
/// use std::cell::OnceCell;
///
@@ -166,7 +163,7 @@ impl<T> OnceCell<T> {
/// assert_eq!(value, Ok(&92));
/// assert_eq!(cell.get(), Some(&92))
/// ```
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[unstable(feature = "once_cell_try", issue = "109737")]
pub fn get_or_try_init<F, E>(&self, f: F) -> Result<&T, E>
where
F: FnOnce() -> Result<T, E>,
@@ -199,8 +196,6 @@ impl<T> OnceCell<T> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
- ///
/// use std::cell::OnceCell;
///
/// let cell: OnceCell<String> = OnceCell::new();
@@ -211,7 +206,7 @@ impl<T> OnceCell<T> {
/// assert_eq!(cell.into_inner(), Some("hello".to_string()));
/// ```
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
pub fn into_inner(self) -> Option<T> {
// Because `into_inner` takes `self` by value, the compiler statically verifies
// that it is not currently borrowed. So it is safe to move out `Option<T>`.
@@ -227,8 +222,6 @@ impl<T> OnceCell<T> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
- ///
/// use std::cell::OnceCell;
///
/// let mut cell: OnceCell<String> = OnceCell::new();
@@ -240,13 +233,13 @@ impl<T> OnceCell<T> {
/// assert_eq!(cell.get(), None);
/// ```
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
pub fn take(&mut self) -> Option<T> {
mem::take(self).into_inner()
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T> Default for OnceCell<T> {
#[inline]
fn default() -> Self {
@@ -254,7 +247,7 @@ impl<T> Default for OnceCell<T> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T: fmt::Debug> fmt::Debug for OnceCell<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.get() {
@@ -264,7 +257,7 @@ impl<T: fmt::Debug> fmt::Debug for OnceCell<T> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T: Clone> Clone for OnceCell<T> {
#[inline]
fn clone(&self) -> OnceCell<T> {
@@ -279,7 +272,7 @@ impl<T: Clone> Clone for OnceCell<T> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T: PartialEq> PartialEq for OnceCell<T> {
#[inline]
fn eq(&self, other: &Self) -> bool {
@@ -287,10 +280,11 @@ impl<T: PartialEq> PartialEq for OnceCell<T> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T: Eq> Eq for OnceCell<T> {}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
+#[rustc_const_unstable(feature = "const_convert", issue = "88674")]
impl<T> const From<T> for OnceCell<T> {
/// Creates a new `OnceCell<T>` which already contains the given `value`.
#[inline]
@@ -300,5 +294,5 @@ impl<T> const From<T> for OnceCell<T> {
}
// Just like for `Cell<T>` this isn't needed, but results in nicer error messages.
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T> !Sync for OnceCell<T> {}
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index 068637d1a..55331475a 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -231,7 +231,8 @@ pub trait PartialEq<Rhs: ?Sized = Self> {
}
}
-/// Derive macro generating an impl of the trait `PartialEq`.
+/// Derive macro generating an impl of the trait [`PartialEq`].
+/// The behavior of this macro is described in detail [here](PartialEq#derivable).
#[rustc_builtin_macro]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
#[allow_internal_unstable(core_intrinsics, structural_match)]
@@ -297,7 +298,7 @@ pub trait Eq: PartialEq<Self> {
fn assert_receiver_is_total_eq(&self) {}
}
-/// Derive macro generating an impl of the trait `Eq`.
+/// Derive macro generating an impl of the trait [`Eq`].
#[rustc_builtin_macro]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
#[allow_internal_unstable(core_intrinsics, derive_eq, structural_match, no_coverage)]
@@ -800,10 +801,7 @@ pub trait Ord: Eq + PartialOrd<Self> {
Self: Sized,
Self: ~const Destruct,
{
- match self.cmp(&other) {
- Ordering::Less | Ordering::Equal => other,
- Ordering::Greater => self,
- }
+ max_by(self, other, Ord::cmp)
}
/// Compares and returns the minimum of two values.
@@ -824,10 +822,7 @@ pub trait Ord: Eq + PartialOrd<Self> {
Self: Sized,
Self: ~const Destruct,
{
- match self.cmp(&other) {
- Ordering::Less | Ordering::Equal => self,
- Ordering::Greater => other,
- }
+ min_by(self, other, Ord::cmp)
}
/// Restrict a value to a certain interval.
@@ -865,7 +860,8 @@ pub trait Ord: Eq + PartialOrd<Self> {
}
}
-/// Derive macro generating an impl of the trait `Ord`.
+/// Derive macro generating an impl of the trait [`Ord`].
+/// The behavior of this macro is described in detail [here](Ord#derivable).
#[rustc_builtin_macro]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
#[allow_internal_unstable(core_intrinsics)]
@@ -1144,7 +1140,8 @@ pub trait PartialOrd<Rhs: ?Sized = Self>: PartialEq<Rhs> {
}
}
-/// Derive macro generating an impl of the trait `PartialOrd`.
+/// Derive macro generating an impl of the trait [`PartialOrd`].
+/// The behavior of this macro is described in detail [here](PartialOrd#derivable).
#[rustc_builtin_macro]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
#[allow_internal_unstable(core_intrinsics)]
@@ -1190,7 +1187,12 @@ pub const fn min<T: ~const Ord + ~const Destruct>(v1: T, v2: T) -> T {
#[inline]
#[must_use]
#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
-pub fn min_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+pub const fn min_by<T, F: ~const FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T
+where
+ T: ~const Destruct,
+ F: ~const Destruct,
+{
match compare(&v1, &v2) {
Ordering::Less | Ordering::Equal => v1,
Ordering::Greater => v2,
@@ -1212,8 +1214,14 @@ pub fn min_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
#[inline]
#[must_use]
#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
-pub fn min_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T {
- min_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2)))
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+pub const fn min_by_key<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(v1: T, v2: T, mut f: F) -> T
+where
+ T: ~const Destruct,
+ F: ~const Destruct,
+ K: ~const Destruct,
+{
+ min_by(v1, v2, const |v1, v2| f(v1).cmp(&f(v2)))
}
/// Compares and returns the maximum of two values.
@@ -1254,7 +1262,12 @@ pub const fn max<T: ~const Ord + ~const Destruct>(v1: T, v2: T) -> T {
#[inline]
#[must_use]
#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
-pub fn max_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+pub const fn max_by<T, F: ~const FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T
+where
+ T: ~const Destruct,
+ F: ~const Destruct,
+{
match compare(&v1, &v2) {
Ordering::Less | Ordering::Equal => v2,
Ordering::Greater => v1,
@@ -1276,8 +1289,14 @@ pub fn max_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
#[inline]
#[must_use]
#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
-pub fn max_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T {
- max_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2)))
+#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+pub const fn max_by_key<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(v1: T, v2: T, mut f: F) -> T
+where
+ T: ~const Destruct,
+ F: ~const Destruct,
+ K: ~const Destruct,
+{
+ max_by(v1, v2, const |v1, v2| f(v1).cmp(&f(v2)))
}
// Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types
diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs
index 805354be0..5888e2960 100644
--- a/library/core/src/convert/mod.rs
+++ b/library/core/src/convert/mod.rs
@@ -722,6 +722,7 @@ where
///
/// That is, this conversion is whatever the implementation of
/// <code>[From]&lt;T&gt; for U</code> chooses to do.
+ #[inline]
fn into(self) -> U {
U::from(self)
}
@@ -763,6 +764,7 @@ where
{
type Error = U::Error;
+ #[inline]
fn try_into(self) -> Result<U, U::Error> {
U::try_from(self)
}
@@ -778,6 +780,7 @@ where
{
type Error = Infallible;
+ #[inline]
fn try_from(value: U) -> Result<Self, Self::Error> {
Ok(U::into(value))
}
diff --git a/library/core/src/convert/num.rs b/library/core/src/convert/num.rs
index 4da7c3234..a74a56bc5 100644
--- a/library/core/src/convert/num.rs
+++ b/library/core/src/convert/num.rs
@@ -172,7 +172,18 @@ impl_from! { f32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0"
#[stable(feature = "float_from_bool", since = "1.68.0")]
#[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
impl const From<bool> for f32 {
- /// Converts `bool` to `f32` losslessly.
+ /// Converts `bool` to `f32` losslessly. The resulting value is positive
+ /// `0.0` for `false` and `1.0` for `true` values.
+ ///
+ /// # Examples
+ /// ```
+ /// let x: f32 = false.into();
+ /// assert_eq!(x, 0.0);
+ /// assert!(x.is_sign_positive());
+ ///
+ /// let y: f32 = true.into();
+ /// assert_eq!(y, 1.0);
+ /// ```
#[inline]
fn from(small: bool) -> Self {
small as u8 as Self
@@ -181,7 +192,18 @@ impl const From<bool> for f32 {
#[stable(feature = "float_from_bool", since = "1.68.0")]
#[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
impl const From<bool> for f64 {
- /// Converts `bool` to `f64` losslessly.
+ /// Converts `bool` to `f64` losslessly. The resulting value is positive
+ /// `0.0` for `false` and `1.0` for `true` values.
+ ///
+ /// # Examples
+ /// ```
+ /// let x: f64 = false.into();
+ /// assert_eq!(x, 0.0);
+ /// assert!(x.is_sign_positive());
+ ///
+ /// let y: f64 = true.into();
+ /// assert_eq!(y, 1.0);
+ /// ```
#[inline]
fn from(small: bool) -> Self {
small as u8 as Self
diff --git a/library/core/src/error.rs b/library/core/src/error.rs
index d4103183c..11cb08275 100644
--- a/library/core/src/error.rs
+++ b/library/core/src/error.rs
@@ -28,7 +28,7 @@ use crate::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "Error")]
#[rustc_has_incoherent_inherent_impls]
-#[cfg_attr(not(bootstrap), allow(multiple_supertrait_upcastable))]
+#[allow(multiple_supertrait_upcastable)]
pub trait Error: Debug + Display {
/// The lower-level source of this error, if any.
///
diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs
index fe8abdf7f..4a5306cca 100644
--- a/library/core/src/ffi/c_str.rs
+++ b/library/core/src/ffi/c_str.rs
@@ -172,6 +172,7 @@ impl fmt::Debug for CStr {
#[stable(feature = "cstr_default", since = "1.10.0")]
impl Default for &CStr {
+ #[inline]
fn default() -> Self {
const SLICE: &[c_char] = &[0];
// SAFETY: `SLICE` is indeed pointing to a valid nul-terminated string.
@@ -623,6 +624,7 @@ impl CStr {
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialEq for CStr {
+ #[inline]
fn eq(&self, other: &CStr) -> bool {
self.to_bytes().eq(other.to_bytes())
}
@@ -631,12 +633,14 @@ impl PartialEq for CStr {
impl Eq for CStr {}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialOrd for CStr {
+ #[inline]
fn partial_cmp(&self, other: &CStr) -> Option<Ordering> {
self.to_bytes().partial_cmp(&other.to_bytes())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Ord for CStr {
+ #[inline]
fn cmp(&self, other: &CStr) -> Ordering {
self.to_bytes().cmp(&other.to_bytes())
}
@@ -646,6 +650,7 @@ impl Ord for CStr {
impl ops::Index<ops::RangeFrom<usize>> for CStr {
type Output = CStr;
+ #[inline]
fn index(&self, index: ops::RangeFrom<usize>) -> &CStr {
let bytes = self.to_bytes_with_nul();
// we need to manually check the starting index to account for the null
diff --git a/library/core/src/ffi/mod.rs b/library/core/src/ffi/mod.rs
index 27f665904..c4f554c8c 100644
--- a/library/core/src/ffi/mod.rs
+++ b/library/core/src/ffi/mod.rs
@@ -615,12 +615,15 @@ impl<'f> Drop for VaListImpl<'f> {
extern "rust-intrinsic" {
/// Destroy the arglist `ap` after initialization with `va_start` or
/// `va_copy`.
+ #[rustc_nounwind]
fn va_end(ap: &mut VaListImpl<'_>);
/// Copies the current location of arglist `src` to the arglist `dst`.
+ #[rustc_nounwind]
fn va_copy<'f>(dest: *mut VaListImpl<'f>, src: &VaListImpl<'f>);
/// Loads an argument of type `T` from the `va_list` `ap` and increment the
/// argument `ap` points to.
+ #[rustc_nounwind]
fn va_arg<T: sealed_trait::VaArgSafe>(ap: &mut VaListImpl<'_>) -> T;
}
diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs
index c9821bf81..fcda097f0 100644
--- a/library/core/src/fmt/mod.rs
+++ b/library/core/src/fmt/mod.rs
@@ -267,7 +267,7 @@ extern "C" {
/// family of functions. It contains a function to format the given value. At
/// compile time it is ensured that the function and the value have the correct
/// types, and then this struct is used to canonicalize arguments to one type.
-#[cfg_attr(not(bootstrap), lang = "format_argument")]
+#[lang = "format_argument"]
#[derive(Copy, Clone)]
#[allow(missing_debug_implementations)]
#[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
@@ -280,7 +280,7 @@ pub struct ArgumentV1<'a> {
/// This struct represents the unsafety of constructing an `Arguments`.
/// It exists, rather than an unsafe function, in order to simplify the expansion
/// of `format_args!(..)` and reduce the scope of the `unsafe` block.
-#[cfg_attr(not(bootstrap), lang = "format_unsafe_arg")]
+#[lang = "format_unsafe_arg"]
#[allow(missing_debug_implementations)]
#[doc(hidden)]
#[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
@@ -392,8 +392,31 @@ enum FlagV1 {
}
impl<'a> Arguments<'a> {
+ #[doc(hidden)]
+ #[inline]
+ #[unstable(feature = "fmt_internals", issue = "none")]
+ #[rustc_const_unstable(feature = "const_fmt_arguments_new", issue = "none")]
+ pub const fn new_const(pieces: &'a [&'static str]) -> Self {
+ if pieces.len() > 1 {
+ panic!("invalid args");
+ }
+ Arguments { pieces, fmt: None, args: &[] }
+ }
+
/// When using the format_args!() macro, this function is used to generate the
/// Arguments structure.
+ #[cfg(not(bootstrap))]
+ #[doc(hidden)]
+ #[inline]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ pub fn new_v1(pieces: &'a [&'static str], args: &'a [ArgumentV1<'a>]) -> Arguments<'a> {
+ if pieces.len() < args.len() || pieces.len() > args.len() + 1 {
+ panic!("invalid args");
+ }
+ Arguments { pieces, fmt: None, args }
+ }
+
+ #[cfg(bootstrap)]
#[doc(hidden)]
#[inline]
#[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
@@ -417,8 +440,7 @@ impl<'a> Arguments<'a> {
#[doc(hidden)]
#[inline]
#[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
- #[rustc_const_unstable(feature = "const_fmt_arguments_new", issue = "none")]
- pub const fn new_v1_formatted(
+ pub fn new_v1_formatted(
pieces: &'a [&'static str],
args: &'a [ArgumentV1<'a>],
fmt: &'a [rt::v1::Argument],
@@ -475,7 +497,7 @@ impl<'a> Arguments<'a> {
/// ```
///
/// [`format()`]: ../../std/fmt/fn.format.html
-#[cfg_attr(not(bootstrap), lang = "format_arguments")]
+#[lang = "format_arguments"]
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Copy, Clone)]
pub struct Arguments<'a> {
diff --git a/library/core/src/fmt/rt/v1.rs b/library/core/src/fmt/rt/v1.rs
index 11a50951a..6d70796f7 100644
--- a/library/core/src/fmt/rt/v1.rs
+++ b/library/core/src/fmt/rt/v1.rs
@@ -5,7 +5,7 @@
//! these can be statically allocated and are slightly optimized for the runtime
#![allow(missing_debug_implementations)]
-#[cfg_attr(not(bootstrap), lang = "format_placeholder")]
+#[lang = "format_placeholder"]
#[derive(Copy, Clone)]
// FIXME: Rename this to Placeholder
pub struct Argument {
@@ -37,7 +37,7 @@ impl Argument {
}
/// Possible alignments that can be requested as part of a formatting directive.
-#[cfg_attr(not(bootstrap), lang = "format_alignment")]
+#[lang = "format_alignment"]
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum Alignment {
/// Indication that contents should be left-aligned.
@@ -51,7 +51,7 @@ pub enum Alignment {
}
/// Used by [width](https://doc.rust-lang.org/std/fmt/#width) and [precision](https://doc.rust-lang.org/std/fmt/#precision) specifiers.
-#[cfg_attr(not(bootstrap), lang = "format_count")]
+#[lang = "format_count"]
#[derive(Copy, Clone)]
pub enum Count {
/// Specified with a literal number, stores the value
diff --git a/library/core/src/future/mod.rs b/library/core/src/future/mod.rs
index 46cbcd435..04f02d47f 100644
--- a/library/core/src/future/mod.rs
+++ b/library/core/src/future/mod.rs
@@ -67,14 +67,10 @@ pub unsafe fn get_context<'a, 'b>(cx: ResumeTy) -> &'a mut Context<'b> {
unsafe { &mut *cx.0.as_ptr().cast() }
}
-// FIXME(swatinem): This fn is currently needed to work around shortcomings
-// in type and lifetime inference.
-// See the comment at the bottom of `LoweringContext::make_async_expr` and
-// <https://github.com/rust-lang/rust/issues/104826>.
#[doc(hidden)]
#[unstable(feature = "gen_future", issue = "50547")]
#[inline]
-#[lang = "identity_future"]
+#[cfg_attr(bootstrap, lang = "identity_future")]
pub const fn identity_future<O, Fut: Future<Output = O>>(f: Fut) -> Fut {
f
}
diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs
index 71a0d1825..4e7bae7bc 100644
--- a/library/core/src/hash/mod.rs
+++ b/library/core/src/hash/mod.rs
@@ -158,7 +158,7 @@ mod sip;
///
/// Implementations of `hash` should ensure that the data they
/// pass to the `Hasher` are prefix-free. That is,
-/// unequal values should cause two different sequences of values to be written,
+/// values which are not equal should cause two different sequences of values to be written,
/// and neither of the two sequences should be a prefix of the other.
///
/// For example, the standard implementation of [`Hash` for `&str`][impl] passes an extra
@@ -834,7 +834,7 @@ mod impls {
#[inline]
fn hash_slice<H: ~const Hasher>(data: &[$ty], state: &mut H) {
- let newlen = data.len() * mem::size_of::<$ty>();
+ let newlen = mem::size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs
index ee13dae60..a20556577 100644
--- a/library/core/src/hint.rs
+++ b/library/core/src/hint.rs
@@ -228,7 +228,7 @@ pub fn spin_loop() {
/// This _immediately_ precludes any direct use of this function for cryptographic or security
/// purposes.
///
-/// While not suitable in those mission-critical cases, `back_box`'s functionality can generally be
+/// While not suitable in those mission-critical cases, `black_box`'s functionality can generally be
/// relied upon for benchmarking, and should be used there. It will try to ensure that the
/// compiler doesn't optimize away part of the intended test code based on context. For
/// example:
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index 18a90599c..a7c100e1b 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -84,6 +84,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::Relaxed`] as both the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_relaxed_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -91,6 +92,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::Relaxed`] and [`Ordering::Acquire`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_relaxed_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -98,6 +100,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::Relaxed`] and [`Ordering::SeqCst`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_relaxed_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -105,6 +108,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::Acquire`] and [`Ordering::Relaxed`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_acquire_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -112,6 +116,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::Acquire`] as both the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_acquire_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -119,6 +124,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::Acquire`] and [`Ordering::SeqCst`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_acquire_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -126,6 +132,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::Release`] and [`Ordering::Relaxed`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_release_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -133,6 +140,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::Release`] and [`Ordering::Acquire`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_release_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -140,6 +148,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::Release`] and [`Ordering::SeqCst`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_release_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -147,6 +156,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::AcqRel`] and [`Ordering::Relaxed`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_acqrel_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -154,6 +164,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::AcqRel`] and [`Ordering::Acquire`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_acqrel_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -161,6 +172,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::AcqRel`] and [`Ordering::SeqCst`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_acqrel_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -168,6 +180,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::SeqCst`] and [`Ordering::Relaxed`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_seqcst_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -175,6 +188,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::SeqCst`] and [`Ordering::Acquire`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_seqcst_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -182,6 +196,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange` method by passing
/// [`Ordering::SeqCst`] as both the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange`].
+ #[rustc_nounwind]
pub fn atomic_cxchg_seqcst_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
@@ -190,6 +205,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::Relaxed`] as both the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_relaxed_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -197,6 +213,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::Relaxed`] and [`Ordering::Acquire`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_relaxed_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -204,6 +221,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::Relaxed`] and [`Ordering::SeqCst`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_relaxed_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -211,6 +229,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::Acquire`] and [`Ordering::Relaxed`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_acquire_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -218,6 +237,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::Acquire`] as both the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_acquire_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -225,6 +245,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::Acquire`] and [`Ordering::SeqCst`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_acquire_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -232,6 +253,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::Release`] and [`Ordering::Relaxed`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_release_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -239,6 +261,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::Release`] and [`Ordering::Acquire`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_release_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -246,6 +269,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::Release`] and [`Ordering::SeqCst`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_release_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -253,6 +277,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::AcqRel`] and [`Ordering::Relaxed`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_acqrel_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -260,6 +285,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::AcqRel`] and [`Ordering::Acquire`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_acqrel_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -267,6 +293,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::AcqRel`] and [`Ordering::SeqCst`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_acqrel_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -274,6 +301,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::SeqCst`] and [`Ordering::Relaxed`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_seqcst_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -281,6 +309,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::SeqCst`] and [`Ordering::Acquire`] as the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_seqcst_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Stores a value if the current value is the same as the `old` value.
///
@@ -288,6 +317,7 @@ extern "rust-intrinsic" {
/// [`atomic`] types via the `compare_exchange_weak` method by passing
/// [`Ordering::SeqCst`] as both the success and failure parameters.
/// For example, [`AtomicBool::compare_exchange_weak`].
+ #[rustc_nounwind]
pub fn atomic_cxchgweak_seqcst_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
/// Loads the current value of the pointer.
@@ -295,19 +325,23 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `load` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::load`].
+ #[rustc_nounwind]
pub fn atomic_load_seqcst<T: Copy>(src: *const T) -> T;
/// Loads the current value of the pointer.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `load` method by passing
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::load`].
+ #[rustc_nounwind]
pub fn atomic_load_acquire<T: Copy>(src: *const T) -> T;
/// Loads the current value of the pointer.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `load` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::load`].
+ #[rustc_nounwind]
pub fn atomic_load_relaxed<T: Copy>(src: *const T) -> T;
+ #[rustc_nounwind]
pub fn atomic_load_unordered<T: Copy>(src: *const T) -> T;
/// Stores the value at the specified memory location.
@@ -315,19 +349,23 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `store` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::store`].
+ #[rustc_nounwind]
pub fn atomic_store_seqcst<T: Copy>(dst: *mut T, val: T);
/// Stores the value at the specified memory location.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `store` method by passing
/// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::store`].
+ #[rustc_nounwind]
pub fn atomic_store_release<T: Copy>(dst: *mut T, val: T);
/// Stores the value at the specified memory location.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `store` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::store`].
+ #[rustc_nounwind]
pub fn atomic_store_relaxed<T: Copy>(dst: *mut T, val: T);
+ #[rustc_nounwind]
pub fn atomic_store_unordered<T: Copy>(dst: *mut T, val: T);
/// Stores the value at the specified memory location, returning the old value.
@@ -335,30 +373,35 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `swap` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::swap`].
+ #[rustc_nounwind]
pub fn atomic_xchg_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
/// Stores the value at the specified memory location, returning the old value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `swap` method by passing
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::swap`].
+ #[rustc_nounwind]
pub fn atomic_xchg_acquire<T: Copy>(dst: *mut T, src: T) -> T;
/// Stores the value at the specified memory location, returning the old value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `swap` method by passing
/// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::swap`].
+ #[rustc_nounwind]
pub fn atomic_xchg_release<T: Copy>(dst: *mut T, src: T) -> T;
/// Stores the value at the specified memory location, returning the old value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `swap` method by passing
/// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::swap`].
+ #[rustc_nounwind]
pub fn atomic_xchg_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
/// Stores the value at the specified memory location, returning the old value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `swap` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::swap`].
+ #[rustc_nounwind]
pub fn atomic_xchg_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
/// Adds to the current value, returning the previous value.
@@ -366,30 +409,35 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_add` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ #[rustc_nounwind]
pub fn atomic_xadd_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
/// Adds to the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_add` method by passing
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ #[rustc_nounwind]
pub fn atomic_xadd_acquire<T: Copy>(dst: *mut T, src: T) -> T;
/// Adds to the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_add` method by passing
/// [`Ordering::Release`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ #[rustc_nounwind]
pub fn atomic_xadd_release<T: Copy>(dst: *mut T, src: T) -> T;
/// Adds to the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_add` method by passing
/// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ #[rustc_nounwind]
pub fn atomic_xadd_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
/// Adds to the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_add` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ #[rustc_nounwind]
pub fn atomic_xadd_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
/// Subtract from the current value, returning the previous value.
@@ -397,30 +445,35 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_sub` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ #[rustc_nounwind]
pub fn atomic_xsub_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
/// Subtract from the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_sub` method by passing
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ #[rustc_nounwind]
pub fn atomic_xsub_acquire<T: Copy>(dst: *mut T, src: T) -> T;
/// Subtract from the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_sub` method by passing
/// [`Ordering::Release`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ #[rustc_nounwind]
pub fn atomic_xsub_release<T: Copy>(dst: *mut T, src: T) -> T;
/// Subtract from the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_sub` method by passing
/// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ #[rustc_nounwind]
pub fn atomic_xsub_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
/// Subtract from the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_sub` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ #[rustc_nounwind]
pub fn atomic_xsub_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise and with the current value, returning the previous value.
@@ -428,30 +481,35 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_and` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ #[rustc_nounwind]
pub fn atomic_and_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise and with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_and` method by passing
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ #[rustc_nounwind]
pub fn atomic_and_acquire<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise and with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_and` method by passing
/// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ #[rustc_nounwind]
pub fn atomic_and_release<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise and with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_and` method by passing
/// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ #[rustc_nounwind]
pub fn atomic_and_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise and with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_and` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ #[rustc_nounwind]
pub fn atomic_and_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise nand with the current value, returning the previous value.
@@ -459,30 +517,35 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`AtomicBool`] type via the `fetch_nand` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ #[rustc_nounwind]
pub fn atomic_nand_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise nand with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`AtomicBool`] type via the `fetch_nand` method by passing
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ #[rustc_nounwind]
pub fn atomic_nand_acquire<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise nand with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`AtomicBool`] type via the `fetch_nand` method by passing
/// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ #[rustc_nounwind]
pub fn atomic_nand_release<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise nand with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`AtomicBool`] type via the `fetch_nand` method by passing
/// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ #[rustc_nounwind]
pub fn atomic_nand_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise nand with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`AtomicBool`] type via the `fetch_nand` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ #[rustc_nounwind]
pub fn atomic_nand_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise or with the current value, returning the previous value.
@@ -490,30 +553,35 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_or` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ #[rustc_nounwind]
pub fn atomic_or_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise or with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_or` method by passing
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ #[rustc_nounwind]
pub fn atomic_or_acquire<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise or with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_or` method by passing
/// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ #[rustc_nounwind]
pub fn atomic_or_release<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise or with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_or` method by passing
/// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ #[rustc_nounwind]
pub fn atomic_or_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise or with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_or` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ #[rustc_nounwind]
pub fn atomic_or_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise xor with the current value, returning the previous value.
@@ -521,30 +589,35 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_xor` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ #[rustc_nounwind]
pub fn atomic_xor_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise xor with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_xor` method by passing
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ #[rustc_nounwind]
pub fn atomic_xor_acquire<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise xor with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_xor` method by passing
/// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ #[rustc_nounwind]
pub fn atomic_xor_release<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise xor with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_xor` method by passing
/// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ #[rustc_nounwind]
pub fn atomic_xor_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
/// Bitwise xor with the current value, returning the previous value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] types via the `fetch_xor` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ #[rustc_nounwind]
pub fn atomic_xor_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
/// Maximum with the current value using a signed comparison.
@@ -552,30 +625,35 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] signed integer types via the `fetch_max` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ #[rustc_nounwind]
pub fn atomic_max_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
/// Maximum with the current value using a signed comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] signed integer types via the `fetch_max` method by passing
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ #[rustc_nounwind]
pub fn atomic_max_acquire<T: Copy>(dst: *mut T, src: T) -> T;
/// Maximum with the current value using a signed comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] signed integer types via the `fetch_max` method by passing
/// [`Ordering::Release`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ #[rustc_nounwind]
pub fn atomic_max_release<T: Copy>(dst: *mut T, src: T) -> T;
/// Maximum with the current value using a signed comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] signed integer types via the `fetch_max` method by passing
/// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ #[rustc_nounwind]
pub fn atomic_max_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
/// Maximum with the current value.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] signed integer types via the `fetch_max` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ #[rustc_nounwind]
pub fn atomic_max_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
/// Minimum with the current value using a signed comparison.
@@ -583,18 +661,21 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] signed integer types via the `fetch_min` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ #[rustc_nounwind]
pub fn atomic_min_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
/// Minimum with the current value using a signed comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] signed integer types via the `fetch_min` method by passing
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ #[rustc_nounwind]
pub fn atomic_min_acquire<T: Copy>(dst: *mut T, src: T) -> T;
/// Minimum with the current value using a signed comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] signed integer types via the `fetch_min` method by passing
/// [`Ordering::Release`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ #[rustc_nounwind]
pub fn atomic_min_release<T: Copy>(dst: *mut T, src: T) -> T;
/// Minimum with the current value using a signed comparison.
///
@@ -607,6 +688,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] signed integer types via the `fetch_min` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ #[rustc_nounwind]
pub fn atomic_min_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
/// Minimum with the current value using an unsigned comparison.
@@ -614,30 +696,35 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] unsigned integer types via the `fetch_min` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ #[rustc_nounwind]
pub fn atomic_umin_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
/// Minimum with the current value using an unsigned comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] unsigned integer types via the `fetch_min` method by passing
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ #[rustc_nounwind]
pub fn atomic_umin_acquire<T: Copy>(dst: *mut T, src: T) -> T;
/// Minimum with the current value using an unsigned comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] unsigned integer types via the `fetch_min` method by passing
/// [`Ordering::Release`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ #[rustc_nounwind]
pub fn atomic_umin_release<T: Copy>(dst: *mut T, src: T) -> T;
/// Minimum with the current value using an unsigned comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] unsigned integer types via the `fetch_min` method by passing
/// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ #[rustc_nounwind]
pub fn atomic_umin_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
/// Minimum with the current value using an unsigned comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] unsigned integer types via the `fetch_min` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ #[rustc_nounwind]
pub fn atomic_umin_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
/// Maximum with the current value using an unsigned comparison.
@@ -645,30 +732,35 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] unsigned integer types via the `fetch_max` method by passing
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ #[rustc_nounwind]
pub fn atomic_umax_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
/// Maximum with the current value using an unsigned comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] unsigned integer types via the `fetch_max` method by passing
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ #[rustc_nounwind]
pub fn atomic_umax_acquire<T: Copy>(dst: *mut T, src: T) -> T;
/// Maximum with the current value using an unsigned comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] unsigned integer types via the `fetch_max` method by passing
/// [`Ordering::Release`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ #[rustc_nounwind]
pub fn atomic_umax_release<T: Copy>(dst: *mut T, src: T) -> T;
/// Maximum with the current value using an unsigned comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] unsigned integer types via the `fetch_max` method by passing
/// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ #[rustc_nounwind]
pub fn atomic_umax_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
/// Maximum with the current value using an unsigned comparison.
///
/// The stabilized version of this intrinsic is available on the
/// [`atomic`] unsigned integer types via the `fetch_max` method by passing
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ #[rustc_nounwind]
pub fn atomic_umax_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
/// An atomic fence.
@@ -676,24 +768,28 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available in
/// [`atomic::fence`] by passing [`Ordering::SeqCst`]
/// as the `order`.
+ #[rustc_nounwind]
pub fn atomic_fence_seqcst();
/// An atomic fence.
///
/// The stabilized version of this intrinsic is available in
/// [`atomic::fence`] by passing [`Ordering::Acquire`]
/// as the `order`.
+ #[rustc_nounwind]
pub fn atomic_fence_acquire();
/// An atomic fence.
///
/// The stabilized version of this intrinsic is available in
/// [`atomic::fence`] by passing [`Ordering::Release`]
/// as the `order`.
+ #[rustc_nounwind]
pub fn atomic_fence_release();
/// An atomic fence.
///
/// The stabilized version of this intrinsic is available in
/// [`atomic::fence`] by passing [`Ordering::AcqRel`]
/// as the `order`.
+ #[rustc_nounwind]
pub fn atomic_fence_acqrel();
/// A compiler-only memory barrier.
@@ -706,6 +802,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available in
/// [`atomic::compiler_fence`] by passing [`Ordering::SeqCst`]
/// as the `order`.
+ #[rustc_nounwind]
pub fn atomic_singlethreadfence_seqcst();
/// A compiler-only memory barrier.
///
@@ -717,6 +814,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available in
/// [`atomic::compiler_fence`] by passing [`Ordering::Acquire`]
/// as the `order`.
+ #[rustc_nounwind]
pub fn atomic_singlethreadfence_acquire();
/// A compiler-only memory barrier.
///
@@ -728,6 +826,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available in
/// [`atomic::compiler_fence`] by passing [`Ordering::Release`]
/// as the `order`.
+ #[rustc_nounwind]
pub fn atomic_singlethreadfence_release();
/// A compiler-only memory barrier.
///
@@ -739,6 +838,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is available in
/// [`atomic::compiler_fence`] by passing [`Ordering::AcqRel`]
/// as the `order`.
+ #[rustc_nounwind]
pub fn atomic_singlethreadfence_acqrel();
/// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
@@ -750,6 +850,7 @@ extern "rust-intrinsic" {
/// ranging from (0) - no locality, to (3) - extremely local keep in cache.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn prefetch_read_data<T>(data: *const T, locality: i32);
/// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
/// if supported; otherwise, it is a no-op.
@@ -760,6 +861,7 @@ extern "rust-intrinsic" {
/// ranging from (0) - no locality, to (3) - extremely local keep in cache.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn prefetch_write_data<T>(data: *const T, locality: i32);
/// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
/// if supported; otherwise, it is a no-op.
@@ -770,6 +872,7 @@ extern "rust-intrinsic" {
/// ranging from (0) - no locality, to (3) - extremely local keep in cache.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn prefetch_read_instruction<T>(data: *const T, locality: i32);
/// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
/// if supported; otherwise, it is a no-op.
@@ -780,6 +883,7 @@ extern "rust-intrinsic" {
/// ranging from (0) - no locality, to (3) - extremely local keep in cache.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn prefetch_write_instruction<T>(data: *const T, locality: i32);
/// Magic intrinsic that derives its meaning from attributes
@@ -792,6 +896,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic should not be used outside of the compiler.
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn rustc_peek<T>(_: T) -> T;
/// Aborts the execution of the process.
@@ -810,6 +915,7 @@ extern "rust-intrinsic" {
/// process will probably terminate with a signal like `SIGABRT`, `SIGILL`, `SIGTRAP`, `SIGSEGV` or
/// `SIGBUS`. The precise behaviour is not guaranteed and not stable.
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn abort() -> !;
/// Informs the optimizer that this point in the code is not reachable,
@@ -821,6 +927,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::hint::unreachable_unchecked`].
#[rustc_const_stable(feature = "const_unreachable_unchecked", since = "1.57.0")]
+ #[rustc_nounwind]
pub fn unreachable() -> !;
/// Informs the optimizer that a condition is always true.
@@ -834,6 +941,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_assume", issue = "76972")]
+ #[rustc_nounwind]
pub fn assume(b: bool);
/// Hints to the compiler that branch condition is likely to be true.
@@ -849,6 +957,7 @@ extern "rust-intrinsic" {
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_likely", issue = "none")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn likely(b: bool) -> bool;
/// Hints to the compiler that branch condition is likely to be false.
@@ -864,11 +973,13 @@ extern "rust-intrinsic" {
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_likely", issue = "none")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn unlikely(b: bool) -> bool;
/// Executes a breakpoint trap, for inspection by a debugger.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn breakpoint();
/// The size of a type in bytes.
@@ -884,6 +995,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is [`core::mem::size_of`].
#[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn size_of<T>() -> usize;
/// The minimum alignment of a type.
@@ -896,23 +1008,27 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is [`core::mem::align_of`].
#[rustc_const_stable(feature = "const_min_align_of", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn min_align_of<T>() -> usize;
/// The preferred alignment of a type.
///
/// This intrinsic does not have a stable counterpart.
/// It's "tracking issue" is [#91971](https://github.com/rust-lang/rust/issues/91971).
#[rustc_const_unstable(feature = "const_pref_align_of", issue = "91971")]
+ #[rustc_nounwind]
pub fn pref_align_of<T>() -> usize;
/// The size of the referenced value in bytes.
///
/// The stabilized version of this intrinsic is [`mem::size_of_val`].
#[rustc_const_unstable(feature = "const_size_of_val", issue = "46571")]
+ #[rustc_nounwind]
pub fn size_of_val<T: ?Sized>(_: *const T) -> usize;
/// The required alignment of the referenced value.
///
/// The stabilized version of this intrinsic is [`core::mem::align_of_val`].
#[rustc_const_unstable(feature = "const_align_of_val", issue = "46571")]
+ #[rustc_nounwind]
pub fn min_align_of_val<T: ?Sized>(_: *const T) -> usize;
/// Gets a static string slice containing the name of a type.
@@ -925,6 +1041,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is [`core::any::type_name`].
#[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn type_name<T: ?Sized>() -> &'static str;
/// Gets an identifier which is globally unique to the specified type. This
@@ -939,6 +1056,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is [`core::any::TypeId::of`].
#[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn type_id<T: ?Sized + 'static>() -> u64;
/// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited:
@@ -947,6 +1065,7 @@ extern "rust-intrinsic" {
/// This intrinsic does not have a stable counterpart.
#[rustc_const_stable(feature = "const_assert_type", since = "1.59.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn assert_inhabited<T>();
/// A guard for unsafe functions that cannot ever be executed if `T` does not permit
@@ -955,6 +1074,7 @@ extern "rust-intrinsic" {
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn assert_zero_valid<T>();
/// A guard for `std::mem::uninitialized`. This will statically either panic, or do nothing.
@@ -962,6 +1082,7 @@ extern "rust-intrinsic" {
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn assert_mem_uninitialized_valid<T>();
/// Gets a reference to a static `Location` indicating where it was called.
@@ -974,6 +1095,7 @@ extern "rust-intrinsic" {
/// Consider using [`core::panic::Location::caller`] instead.
#[rustc_const_unstable(feature = "const_caller_location", issue = "76156")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn caller_location() -> &'static crate::panic::Location<'static>;
/// Moves a value out of scope without running drop glue.
@@ -987,6 +1109,7 @@ extern "rust-intrinsic" {
/// any safety invariants.
#[rustc_const_unstable(feature = "const_intrinsic_forget", issue = "none")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn forget<T: ?Sized>(_: T);
/// Reinterprets the bits of a value of one type as another type.
@@ -1250,6 +1373,7 @@ extern "rust-intrinsic" {
#[rustc_allowed_through_unstable_modules]
#[rustc_const_stable(feature = "const_transmute", since = "1.56.0")]
#[rustc_diagnostic_item = "transmute"]
+ #[rustc_nounwind]
pub fn transmute<Src, Dst>(src: Src) -> Dst;
/// Returns `true` if the actual type given as `T` requires drop
@@ -1267,6 +1391,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is [`mem::needs_drop`](crate::mem::needs_drop).
#[rustc_const_stable(feature = "const_needs_drop", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn needs_drop<T: ?Sized>() -> bool;
/// Calculates the offset from a pointer.
@@ -1284,6 +1409,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is [`pointer::offset`].
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[rustc_nounwind]
pub fn offset<T>(dst: *const T, offset: isize) -> *const T;
/// Calculates the offset from a pointer, potentially wrapping.
@@ -1301,6 +1427,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is [`pointer::wrapping_offset`].
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[rustc_nounwind]
pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
/// Masks out bits of the pointer according to a mask.
@@ -1312,6 +1439,7 @@ extern "rust-intrinsic" {
///
/// Consider using [`pointer::mask`] instead.
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn ptr_mask<T>(ptr: *const T, mask: usize) -> *const T;
/// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
@@ -1322,6 +1450,7 @@ extern "rust-intrinsic" {
/// unless size is equal to zero.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T, count: usize);
/// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
/// a size of `count * size_of::<T>()` and an alignment of
@@ -1331,6 +1460,7 @@ extern "rust-intrinsic" {
/// unless size is equal to zero.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: usize);
/// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
/// size of `count * size_of::<T>()` and an alignment of
@@ -1340,158 +1470,187 @@ extern "rust-intrinsic" {
/// unless size is equal to zero.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: usize);
/// Performs a volatile load from the `src` pointer.
///
/// The stabilized version of this intrinsic is [`core::ptr::read_volatile`].
+ #[rustc_nounwind]
pub fn volatile_load<T>(src: *const T) -> T;
/// Performs a volatile store to the `dst` pointer.
///
/// The stabilized version of this intrinsic is [`core::ptr::write_volatile`].
+ #[rustc_nounwind]
pub fn volatile_store<T>(dst: *mut T, val: T);
/// Performs a volatile load from the `src` pointer
/// The pointer is not required to be aligned.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn unaligned_volatile_load<T>(src: *const T) -> T;
/// Performs a volatile store to the `dst` pointer.
/// The pointer is not required to be aligned.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn unaligned_volatile_store<T>(dst: *mut T, val: T);
/// Returns the square root of an `f32`
///
/// The stabilized version of this intrinsic is
/// [`f32::sqrt`](../../std/primitive.f32.html#method.sqrt)
+ #[rustc_nounwind]
pub fn sqrtf32(x: f32) -> f32;
/// Returns the square root of an `f64`
///
/// The stabilized version of this intrinsic is
/// [`f64::sqrt`](../../std/primitive.f64.html#method.sqrt)
+ #[rustc_nounwind]
pub fn sqrtf64(x: f64) -> f64;
/// Raises an `f32` to an integer power.
///
/// The stabilized version of this intrinsic is
/// [`f32::powi`](../../std/primitive.f32.html#method.powi)
+ #[rustc_nounwind]
pub fn powif32(a: f32, x: i32) -> f32;
/// Raises an `f64` to an integer power.
///
/// The stabilized version of this intrinsic is
/// [`f64::powi`](../../std/primitive.f64.html#method.powi)
+ #[rustc_nounwind]
pub fn powif64(a: f64, x: i32) -> f64;
/// Returns the sine of an `f32`.
///
/// The stabilized version of this intrinsic is
/// [`f32::sin`](../../std/primitive.f32.html#method.sin)
+ #[rustc_nounwind]
pub fn sinf32(x: f32) -> f32;
/// Returns the sine of an `f64`.
///
/// The stabilized version of this intrinsic is
/// [`f64::sin`](../../std/primitive.f64.html#method.sin)
+ #[rustc_nounwind]
pub fn sinf64(x: f64) -> f64;
/// Returns the cosine of an `f32`.
///
/// The stabilized version of this intrinsic is
/// [`f32::cos`](../../std/primitive.f32.html#method.cos)
+ #[rustc_nounwind]
pub fn cosf32(x: f32) -> f32;
/// Returns the cosine of an `f64`.
///
/// The stabilized version of this intrinsic is
/// [`f64::cos`](../../std/primitive.f64.html#method.cos)
+ #[rustc_nounwind]
pub fn cosf64(x: f64) -> f64;
/// Raises an `f32` to an `f32` power.
///
/// The stabilized version of this intrinsic is
/// [`f32::powf`](../../std/primitive.f32.html#method.powf)
+ #[rustc_nounwind]
pub fn powf32(a: f32, x: f32) -> f32;
/// Raises an `f64` to an `f64` power.
///
/// The stabilized version of this intrinsic is
/// [`f64::powf`](../../std/primitive.f64.html#method.powf)
+ #[rustc_nounwind]
pub fn powf64(a: f64, x: f64) -> f64;
/// Returns the exponential of an `f32`.
///
/// The stabilized version of this intrinsic is
/// [`f32::exp`](../../std/primitive.f32.html#method.exp)
+ #[rustc_nounwind]
pub fn expf32(x: f32) -> f32;
/// Returns the exponential of an `f64`.
///
/// The stabilized version of this intrinsic is
/// [`f64::exp`](../../std/primitive.f64.html#method.exp)
+ #[rustc_nounwind]
pub fn expf64(x: f64) -> f64;
/// Returns 2 raised to the power of an `f32`.
///
/// The stabilized version of this intrinsic is
/// [`f32::exp2`](../../std/primitive.f32.html#method.exp2)
+ #[rustc_nounwind]
pub fn exp2f32(x: f32) -> f32;
/// Returns 2 raised to the power of an `f64`.
///
/// The stabilized version of this intrinsic is
/// [`f64::exp2`](../../std/primitive.f64.html#method.exp2)
+ #[rustc_nounwind]
pub fn exp2f64(x: f64) -> f64;
/// Returns the natural logarithm of an `f32`.
///
/// The stabilized version of this intrinsic is
/// [`f32::ln`](../../std/primitive.f32.html#method.ln)
+ #[rustc_nounwind]
pub fn logf32(x: f32) -> f32;
/// Returns the natural logarithm of an `f64`.
///
/// The stabilized version of this intrinsic is
/// [`f64::ln`](../../std/primitive.f64.html#method.ln)
+ #[rustc_nounwind]
pub fn logf64(x: f64) -> f64;
/// Returns the base 10 logarithm of an `f32`.
///
/// The stabilized version of this intrinsic is
/// [`f32::log10`](../../std/primitive.f32.html#method.log10)
+ #[rustc_nounwind]
pub fn log10f32(x: f32) -> f32;
/// Returns the base 10 logarithm of an `f64`.
///
/// The stabilized version of this intrinsic is
/// [`f64::log10`](../../std/primitive.f64.html#method.log10)
+ #[rustc_nounwind]
pub fn log10f64(x: f64) -> f64;
/// Returns the base 2 logarithm of an `f32`.
///
/// The stabilized version of this intrinsic is
/// [`f32::log2`](../../std/primitive.f32.html#method.log2)
+ #[rustc_nounwind]
pub fn log2f32(x: f32) -> f32;
/// Returns the base 2 logarithm of an `f64`.
///
/// The stabilized version of this intrinsic is
/// [`f64::log2`](../../std/primitive.f64.html#method.log2)
+ #[rustc_nounwind]
pub fn log2f64(x: f64) -> f64;
/// Returns `a * b + c` for `f32` values.
///
/// The stabilized version of this intrinsic is
/// [`f32::mul_add`](../../std/primitive.f32.html#method.mul_add)
+ #[rustc_nounwind]
pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
/// Returns `a * b + c` for `f64` values.
///
/// The stabilized version of this intrinsic is
/// [`f64::mul_add`](../../std/primitive.f64.html#method.mul_add)
+ #[rustc_nounwind]
pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
/// Returns the absolute value of an `f32`.
///
/// The stabilized version of this intrinsic is
/// [`f32::abs`](../../std/primitive.f32.html#method.abs)
+ #[rustc_nounwind]
pub fn fabsf32(x: f32) -> f32;
/// Returns the absolute value of an `f64`.
///
/// The stabilized version of this intrinsic is
/// [`f64::abs`](../../std/primitive.f64.html#method.abs)
+ #[rustc_nounwind]
pub fn fabsf64(x: f64) -> f64;
/// Returns the minimum of two `f32` values.
@@ -1504,6 +1663,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is
/// [`f32::min`]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn minnumf32(x: f32, y: f32) -> f32;
/// Returns the minimum of two `f64` values.
///
@@ -1515,6 +1675,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is
/// [`f64::min`]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn minnumf64(x: f64, y: f64) -> f64;
/// Returns the maximum of two `f32` values.
///
@@ -1526,6 +1687,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is
/// [`f32::max`]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn maxnumf32(x: f32, y: f32) -> f32;
/// Returns the maximum of two `f64` values.
///
@@ -1537,113 +1699,155 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is
/// [`f64::max`]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn maxnumf64(x: f64, y: f64) -> f64;
/// Copies the sign from `y` to `x` for `f32` values.
///
/// The stabilized version of this intrinsic is
/// [`f32::copysign`](../../std/primitive.f32.html#method.copysign)
+ #[rustc_nounwind]
pub fn copysignf32(x: f32, y: f32) -> f32;
/// Copies the sign from `y` to `x` for `f64` values.
///
/// The stabilized version of this intrinsic is
/// [`f64::copysign`](../../std/primitive.f64.html#method.copysign)
+ #[rustc_nounwind]
pub fn copysignf64(x: f64, y: f64) -> f64;
/// Returns the largest integer less than or equal to an `f32`.
///
/// The stabilized version of this intrinsic is
/// [`f32::floor`](../../std/primitive.f32.html#method.floor)
+ #[rustc_nounwind]
pub fn floorf32(x: f32) -> f32;
/// Returns the largest integer less than or equal to an `f64`.
///
/// The stabilized version of this intrinsic is
/// [`f64::floor`](../../std/primitive.f64.html#method.floor)
+ #[rustc_nounwind]
pub fn floorf64(x: f64) -> f64;
/// Returns the smallest integer greater than or equal to an `f32`.
///
/// The stabilized version of this intrinsic is
/// [`f32::ceil`](../../std/primitive.f32.html#method.ceil)
+ #[rustc_nounwind]
pub fn ceilf32(x: f32) -> f32;
/// Returns the smallest integer greater than or equal to an `f64`.
///
/// The stabilized version of this intrinsic is
/// [`f64::ceil`](../../std/primitive.f64.html#method.ceil)
+ #[rustc_nounwind]
pub fn ceilf64(x: f64) -> f64;
/// Returns the integer part of an `f32`.
///
/// The stabilized version of this intrinsic is
/// [`f32::trunc`](../../std/primitive.f32.html#method.trunc)
+ #[rustc_nounwind]
pub fn truncf32(x: f32) -> f32;
/// Returns the integer part of an `f64`.
///
/// The stabilized version of this intrinsic is
/// [`f64::trunc`](../../std/primitive.f64.html#method.trunc)
+ #[rustc_nounwind]
pub fn truncf64(x: f64) -> f64;
/// Returns the nearest integer to an `f32`. May raise an inexact floating-point exception
/// if the argument is not an integer.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::round_ties_even`](../../std/primitive.f32.html#method.round_ties_even)
+ #[rustc_nounwind]
pub fn rintf32(x: f32) -> f32;
/// Returns the nearest integer to an `f64`. May raise an inexact floating-point exception
/// if the argument is not an integer.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::round_ties_even`](../../std/primitive.f64.html#method.round_ties_even)
+ #[rustc_nounwind]
pub fn rintf64(x: f64) -> f64;
/// Returns the nearest integer to an `f32`.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn nearbyintf32(x: f32) -> f32;
/// Returns the nearest integer to an `f64`.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn nearbyintf64(x: f64) -> f64;
/// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero.
///
/// The stabilized version of this intrinsic is
/// [`f32::round`](../../std/primitive.f32.html#method.round)
+ #[rustc_nounwind]
pub fn roundf32(x: f32) -> f32;
/// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
///
/// The stabilized version of this intrinsic is
/// [`f64::round`](../../std/primitive.f64.html#method.round)
+ #[rustc_nounwind]
pub fn roundf64(x: f64) -> f64;
+ /// Returns the nearest integer to an `f32`. Rounds half-way cases to the number
+ /// with an even least significant digit.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[cfg(not(bootstrap))]
+ #[rustc_nounwind]
+ pub fn roundevenf32(x: f32) -> f32;
+ /// Returns the nearest integer to an `f64`. Rounds half-way cases to the number
+ /// with an even least significant digit.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[cfg(not(bootstrap))]
+ #[rustc_nounwind]
+ pub fn roundevenf64(x: f64) -> f64;
+
/// Float addition that allows optimizations based on algebraic rules.
/// May assume inputs are finite.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn fadd_fast<T: Copy>(a: T, b: T) -> T;
/// Float subtraction that allows optimizations based on algebraic rules.
/// May assume inputs are finite.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn fsub_fast<T: Copy>(a: T, b: T) -> T;
/// Float multiplication that allows optimizations based on algebraic rules.
/// May assume inputs are finite.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn fmul_fast<T: Copy>(a: T, b: T) -> T;
/// Float division that allows optimizations based on algebraic rules.
/// May assume inputs are finite.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn fdiv_fast<T: Copy>(a: T, b: T) -> T;
/// Float remainder that allows optimizations based on algebraic rules.
/// May assume inputs are finite.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_nounwind]
pub fn frem_fast<T: Copy>(a: T, b: T) -> T;
/// Convert with LLVM’s fptoui/fptosi, which may return undef for values out of range
/// (<https://github.com/rust-lang/rust/issues/10184>)
///
/// Stabilized as [`f32::to_int_unchecked`] and [`f64::to_int_unchecked`].
+ #[rustc_nounwind]
pub fn float_to_int_unchecked<Float: Copy, Int: Copy>(value: Float) -> Int;
/// Returns the number of bits set in an integer type `T`
@@ -1658,6 +1862,7 @@ extern "rust-intrinsic" {
/// [`u32::count_ones`]
#[rustc_const_stable(feature = "const_ctpop", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn ctpop<T: Copy>(x: T) -> T;
/// Returns the number of leading unset bits (zeroes) in an integer type `T`.
@@ -1696,6 +1901,7 @@ extern "rust-intrinsic" {
/// ```
#[rustc_const_stable(feature = "const_ctlz", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn ctlz<T: Copy>(x: T) -> T;
/// Like `ctlz`, but extra-unsafe as it returns `undef` when
@@ -1715,6 +1921,7 @@ extern "rust-intrinsic" {
/// assert_eq!(num_leading, 3);
/// ```
#[rustc_const_stable(feature = "constctlz", since = "1.50.0")]
+ #[rustc_nounwind]
pub fn ctlz_nonzero<T: Copy>(x: T) -> T;
/// Returns the number of trailing unset bits (zeroes) in an integer type `T`.
@@ -1753,6 +1960,7 @@ extern "rust-intrinsic" {
/// ```
#[rustc_const_stable(feature = "const_cttz", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn cttz<T: Copy>(x: T) -> T;
/// Like `cttz`, but extra-unsafe as it returns `undef` when
@@ -1772,6 +1980,7 @@ extern "rust-intrinsic" {
/// assert_eq!(num_trailing, 3);
/// ```
#[rustc_const_stable(feature = "const_cttz_nonzero", since = "1.53.0")]
+ #[rustc_nounwind]
pub fn cttz_nonzero<T: Copy>(x: T) -> T;
/// Reverses the bytes in an integer type `T`.
@@ -1786,6 +1995,7 @@ extern "rust-intrinsic" {
/// [`u32::swap_bytes`]
#[rustc_const_stable(feature = "const_bswap", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn bswap<T: Copy>(x: T) -> T;
/// Reverses the bits in an integer type `T`.
@@ -1800,6 +2010,7 @@ extern "rust-intrinsic" {
/// [`u32::reverse_bits`]
#[rustc_const_stable(feature = "const_bitreverse", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn bitreverse<T: Copy>(x: T) -> T;
/// Performs checked integer addition.
@@ -1814,6 +2025,7 @@ extern "rust-intrinsic" {
/// [`u32::overflowing_add`]
#[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn add_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
/// Performs checked integer subtraction
@@ -1828,6 +2040,7 @@ extern "rust-intrinsic" {
/// [`u32::overflowing_sub`]
#[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn sub_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
/// Performs checked integer multiplication
@@ -1842,6 +2055,7 @@ extern "rust-intrinsic" {
/// [`u32::overflowing_mul`]
#[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
/// Performs an exact division, resulting in undefined behavior where
@@ -1849,6 +2063,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_exact_div", issue = "none")]
+ #[rustc_nounwind]
pub fn exact_div<T: Copy>(x: T, y: T) -> T;
/// Performs an unchecked division, resulting in undefined behavior
@@ -1858,6 +2073,7 @@ extern "rust-intrinsic" {
/// primitives via the `checked_div` method. For example,
/// [`u32::checked_div`]
#[rustc_const_stable(feature = "const_int_unchecked_div", since = "1.52.0")]
+ #[rustc_nounwind]
pub fn unchecked_div<T: Copy>(x: T, y: T) -> T;
/// Returns the remainder of an unchecked division, resulting in
/// undefined behavior when `y == 0` or `x == T::MIN && y == -1`
@@ -1866,6 +2082,7 @@ extern "rust-intrinsic" {
/// primitives via the `checked_rem` method. For example,
/// [`u32::checked_rem`]
#[rustc_const_stable(feature = "const_int_unchecked_rem", since = "1.52.0")]
+ #[rustc_nounwind]
pub fn unchecked_rem<T: Copy>(x: T, y: T) -> T;
/// Performs an unchecked left shift, resulting in undefined behavior when
@@ -1875,6 +2092,7 @@ extern "rust-intrinsic" {
/// primitives via the `checked_shl` method. For example,
/// [`u32::checked_shl`]
#[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")]
+ #[rustc_nounwind]
pub fn unchecked_shl<T: Copy>(x: T, y: T) -> T;
/// Performs an unchecked right shift, resulting in undefined behavior when
/// `y < 0` or `y >= N`, where N is the width of T in bits.
@@ -1883,6 +2101,7 @@ extern "rust-intrinsic" {
/// primitives via the `checked_shr` method. For example,
/// [`u32::checked_shr`]
#[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")]
+ #[rustc_nounwind]
pub fn unchecked_shr<T: Copy>(x: T, y: T) -> T;
/// Returns the result of an unchecked addition, resulting in
@@ -1890,6 +2109,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
+ #[rustc_nounwind]
pub fn unchecked_add<T: Copy>(x: T, y: T) -> T;
/// Returns the result of an unchecked subtraction, resulting in
@@ -1897,6 +2117,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
+ #[rustc_nounwind]
pub fn unchecked_sub<T: Copy>(x: T, y: T) -> T;
/// Returns the result of an unchecked multiplication, resulting in
@@ -1904,6 +2125,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
+ #[rustc_nounwind]
pub fn unchecked_mul<T: Copy>(x: T, y: T) -> T;
/// Performs rotate left.
@@ -1918,6 +2140,7 @@ extern "rust-intrinsic" {
/// [`u32::rotate_left`]
#[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn rotate_left<T: Copy>(x: T, y: T) -> T;
/// Performs rotate right.
@@ -1932,6 +2155,7 @@ extern "rust-intrinsic" {
/// [`u32::rotate_right`]
#[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn rotate_right<T: Copy>(x: T, y: T) -> T;
/// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits.
@@ -1946,6 +2170,7 @@ extern "rust-intrinsic" {
/// [`u32::wrapping_add`]
#[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn wrapping_add<T: Copy>(a: T, b: T) -> T;
/// Returns (a - b) mod 2<sup>N</sup>, where N is the width of T in bits.
///
@@ -1959,6 +2184,7 @@ extern "rust-intrinsic" {
/// [`u32::wrapping_sub`]
#[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn wrapping_sub<T: Copy>(a: T, b: T) -> T;
/// Returns (a * b) mod 2<sup>N</sup>, where N is the width of T in bits.
///
@@ -1972,6 +2198,7 @@ extern "rust-intrinsic" {
/// [`u32::wrapping_mul`]
#[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn wrapping_mul<T: Copy>(a: T, b: T) -> T;
/// Computes `a + b`, saturating at numeric bounds.
@@ -1986,6 +2213,7 @@ extern "rust-intrinsic" {
/// [`u32::saturating_add`]
#[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn saturating_add<T: Copy>(a: T, b: T) -> T;
/// Computes `a - b`, saturating at numeric bounds.
///
@@ -1999,8 +2227,20 @@ extern "rust-intrinsic" {
/// [`u32::saturating_sub`]
#[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn saturating_sub<T: Copy>(a: T, b: T) -> T;
+ /// This is an implementation detail of [`crate::ptr::read`] and should
+ /// not be used anywhere else. See its comments for why this exists.
+ ///
+ /// This intrinsic can *only* be called where the argument is a local without
+ /// projections (`read_via_copy(p)`, not `read_via_copy(*p)`) so that it
+ /// trivially obeys runtime-MIR rules about derefs in operands.
+ #[cfg(not(bootstrap))]
+ #[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
+ #[rustc_nounwind]
+ pub fn read_via_copy<T>(p: *const T) -> T;
+
/// Returns the value of the discriminant for the variant in 'v';
/// if `T` has no discriminant, returns `0`.
///
@@ -2012,6 +2252,7 @@ extern "rust-intrinsic" {
/// The stabilized version of this intrinsic is [`core::mem::discriminant`].
#[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant;
/// Returns the number of variants of the type `T` cast to a `usize`;
@@ -2025,6 +2266,7 @@ extern "rust-intrinsic" {
/// The to-be-stabilized version of this intrinsic is [`mem::variant_count`].
#[rustc_const_unstable(feature = "variant_count", issue = "73662")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn variant_count<T>() -> usize;
/// Rust's "try catch" construct which invokes the function pointer `try_fn`
@@ -2034,18 +2276,24 @@ extern "rust-intrinsic" {
/// takes the data pointer and a pointer to the target-specific exception
/// object that was caught. For more information see the compiler's
/// source as well as std's catch implementation.
+ ///
+ /// `catch_fn` must not unwind.
+ #[rustc_nounwind]
pub fn r#try(try_fn: fn(*mut u8), data: *mut u8, catch_fn: fn(*mut u8, *mut u8)) -> i32;
/// Emits a `!nontemporal` store according to LLVM (see their docs).
/// Probably will never become stable.
+ #[rustc_nounwind]
pub fn nontemporal_store<T>(ptr: *mut T, val: T);
/// See documentation of `<*const T>::offset_from` for details.
#[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")]
+ #[rustc_nounwind]
pub fn ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize;
/// See documentation of `<*const T>::sub_ptr` for details.
#[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
+ #[rustc_nounwind]
pub fn ptr_offset_from_unsigned<T>(ptr: *const T, base: *const T) -> usize;
/// See documentation of `<*const T>::guaranteed_eq` for details.
@@ -2059,6 +2307,7 @@ extern "rust-intrinsic" {
/// any safety invariants.
#[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8;
/// Allocates a block of memory at compile time.
@@ -2070,6 +2319,7 @@ extern "rust-intrinsic" {
/// - At compile time, a compile error occurs if this constraint is violated.
/// - At runtime, it is not checked.
#[rustc_const_unstable(feature = "const_heap", issue = "79597")]
+ #[rustc_nounwind]
pub fn const_allocate(size: usize, align: usize) -> *mut u8;
/// Deallocates a memory which allocated by `intrinsics::const_allocate` at compile time.
@@ -2083,6 +2333,7 @@ extern "rust-intrinsic" {
/// - If the `ptr` is created in an another const, this intrinsic doesn't deallocate it.
/// - If the `ptr` is pointing to a local variable, this intrinsic doesn't deallocate it.
#[rustc_const_unstable(feature = "const_heap", issue = "79597")]
+ #[rustc_nounwind]
pub fn const_deallocate(ptr: *mut u8, size: usize, align: usize);
/// Determines whether the raw bytes of the two values are equal.
@@ -2107,6 +2358,7 @@ extern "rust-intrinsic" {
/// (The implementation is allowed to branch on the results of comparisons,
/// which is UB if any of their inputs are `undef`.)
#[rustc_const_unstable(feature = "const_intrinsic_raw_eq", issue = "none")]
+ #[rustc_nounwind]
pub fn raw_eq<T>(a: &T, b: &T) -> bool;
/// See documentation of [`std::hint::black_box`] for details.
@@ -2114,14 +2366,17 @@ extern "rust-intrinsic" {
/// [`std::hint::black_box`]: crate::hint::black_box
#[rustc_const_unstable(feature = "const_black_box", issue = "none")]
#[rustc_safe_intrinsic]
+ #[rustc_nounwind]
pub fn black_box<T>(dummy: T) -> T;
/// `ptr` must point to a vtable.
/// The intrinsic will return the size stored in that vtable.
+ #[rustc_nounwind]
pub fn vtable_size(ptr: *const ()) -> usize;
/// `ptr` must point to a vtable.
/// The intrinsic will return the alignment stored in that vtable.
+ #[rustc_nounwind]
pub fn vtable_align(ptr: *const ()) -> usize;
/// Selects which function to call depending on the context.
@@ -2185,6 +2440,13 @@ extern "rust-intrinsic" {
where
G: FnOnce<ARG, Output = RET>,
F: FnOnce<ARG, Output = RET>;
+
+ /// This method creates a pointer to any `Some` value. If the argument is
+ /// `None`, an invalid within-bounds pointer (that is still acceptable for
+ /// constructing an empty slice) is returned.
+ #[cfg(not(bootstrap))]
+ #[rustc_nounwind]
+ pub fn option_payload_ptr<T>(arg: *const Option<T>) -> *const T;
}
// Some functions are defined here because they accidentally got made
@@ -2357,6 +2619,7 @@ pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -
pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
extern "rust-intrinsic" {
#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ #[rustc_nounwind]
pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
}
@@ -2447,6 +2710,7 @@ pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: us
pub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
extern "rust-intrinsic" {
#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ #[rustc_nounwind]
fn copy<T>(src: *const T, dst: *mut T, count: usize);
}
@@ -2519,6 +2783,7 @@ pub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
pub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
extern "rust-intrinsic" {
#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
+ #[rustc_nounwind]
fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
}
diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs
index 72db1d87c..45498a54b 100644
--- a/library/core/src/intrinsics/mir.rs
+++ b/library/core/src/intrinsics/mir.rs
@@ -8,7 +8,7 @@
//!
//! The documentation for this module describes how to use this feature. If you are interested in
//! hacking on the implementation, most of that documentation lives at
-//! `rustc_mir_building/src/build/custom/mod.rs`.
+//! `rustc_mir_build/src/build/custom/mod.rs`.
//!
//! Typical usage will look like this:
//!
@@ -49,6 +49,8 @@
//!
//! The input to the [`mir!`] macro is:
//!
+//! - An optional return type annotation in the form of `type RET = ...;`. This may be required
+//! if the compiler cannot infer the type of RET.
//! - A possibly empty list of local declarations. Locals can also be declared inline on
//! assignments via `let`. Type inference generally works. Shadowing does not.
//! - A list of basic blocks. The first of these is the start block and is where execution begins.
@@ -124,6 +126,18 @@
//! }
//! )
//! }
+//!
+//! #[custom_mir(dialect = "runtime", phase = "optimized")]
+//! fn annotated_return_type() -> (i32, bool) {
+//! mir!(
+//! type RET = (i32, bool);
+//! {
+//! RET.0 = 1;
+//! RET.1 = true;
+//! Return()
+//! }
+//! )
+//! }
//! ```
//!
//! We can also set off compilation failures that happen in sufficiently late stages of the
@@ -218,6 +232,7 @@
//! - `&`, `&mut`, `addr_of!`, and `addr_of_mut!` all work to create their associated rvalue.
//! - [`Discriminant`] and [`Len`] have associated functions.
//! - Unary and binary operations use their normal Rust syntax - `a * b`, `!c`, etc.
+//! - The binary operation `Offset` can be created via [`Offset`].
//! - Checked binary operations are represented by wrapping the associated binop in [`Checked`].
//! - Array repetition syntax (`[foo; 10]`) creates the associated rvalue.
//!
@@ -227,12 +242,12 @@
//! are no resume and abort terminators, and terminators that might unwind do not have any way to
//! indicate the unwind block.
//!
-//! - [`Goto`], [`Return`], [`Unreachable`], [`Drop`](Drop()), and [`DropAndReplace`] have associated functions.
+//! - [`Goto`], [`Return`], [`Unreachable`] and [`Drop`](Drop()) have associated functions.
//! - `match some_int_operand` becomes a `SwitchInt`. Each arm should be `literal => basic_block`
//! - The exception is the last arm, which must be `_ => basic_block` and corresponds to the
//! otherwise branch.
//! - [`Call`] has an associated function as well. The third argument of this function is a normal
-//! function call expresion, for example `my_other_function(a, 5)`.
+//! function call expression, for example `my_other_function(a, 5)`.
//!
#![unstable(
@@ -259,7 +274,6 @@ define!("mir_return", fn Return() -> BasicBlock);
define!("mir_goto", fn Goto(destination: BasicBlock) -> BasicBlock);
define!("mir_unreachable", fn Unreachable() -> BasicBlock);
define!("mir_drop", fn Drop<T>(place: T, goto: BasicBlock));
-define!("mir_drop_and_replace", fn DropAndReplace<T>(place: T, value: T, goto: BasicBlock));
define!("mir_call", fn Call<T>(place: T, goto: BasicBlock, call: T));
define!("mir_storage_live", fn StorageLive<T>(local: T));
define!("mir_storage_dead", fn StorageDead<T>(local: T));
@@ -276,6 +290,7 @@ define!(
fn Discriminant<T>(place: T) -> <T as ::core::marker::DiscriminantKind>::Discriminant
);
define!("mir_set_discriminant", fn SetDiscriminant<T>(place: T, index: u32));
+define!("mir_offset", fn Offset<T, U>(ptr: T, count: U) -> T);
define!(
"mir_field",
/// Access the field with the given index of some place.
@@ -331,6 +346,14 @@ define!(
fn Variant<T>(place: T, index: u32) -> ()
);
define!(
+ "mir_cast_transmute",
+ /// Emits a `CastKind::Transmute` cast.
+ ///
+ /// Needed to test the UB when `sizeof(T) != sizeof(U)`, which can't be
+ /// generated via the normal `mem::transmute`.
+ fn CastTransmute<T, U>(operand: T) -> U
+);
+define!(
"mir_make_place",
#[doc(hidden)]
fn __internal_make_place<T>(place: T) -> *mut T
@@ -343,6 +366,7 @@ define!(
#[rustc_macro_transparency = "transparent"]
pub macro mir {
(
+ $(type RET = $ret_ty:ty ;)?
$(let $local_decl:ident $(: $local_decl_ty:ty)? ;)*
{
@@ -363,7 +387,7 @@ pub macro mir {
{
// Now all locals
#[allow(non_snake_case)]
- let RET;
+ let RET $(: $ret_ty)?;
$(
let $local_decl $(: $local_decl_ty)? ;
)*
diff --git a/library/core/src/iter/adapters/by_ref_sized.rs b/library/core/src/iter/adapters/by_ref_sized.rs
index 477e7117c..4e0e19ddc 100644
--- a/library/core/src/iter/adapters/by_ref_sized.rs
+++ b/library/core/src/iter/adapters/by_ref_sized.rs
@@ -1,3 +1,4 @@
+use crate::num::NonZeroUsize;
use crate::ops::{NeverShortCircuit, Try};
/// Like `Iterator::by_ref`, but requiring `Sized` so it can forward generics.
@@ -26,7 +27,7 @@ impl<I: Iterator> Iterator for ByRefSized<'_, I> {
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
I::advance_by(self.0, n)
}
@@ -62,7 +63,7 @@ impl<I: DoubleEndedIterator> DoubleEndedIterator for ByRefSized<'_, I> {
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
I::advance_back_by(self.0, n)
}
diff --git a/library/core/src/iter/adapters/chain.rs b/library/core/src/iter/adapters/chain.rs
index 60eb3a6da..75727c3a2 100644
--- a/library/core/src/iter/adapters/chain.rs
+++ b/library/core/src/iter/adapters/chain.rs
@@ -1,4 +1,5 @@
use crate::iter::{DoubleEndedIterator, FusedIterator, Iterator, TrustedLen};
+use crate::num::NonZeroUsize;
use crate::ops::Try;
/// An iterator that links two iterators together, in a chain.
@@ -95,38 +96,33 @@ where
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- let mut rem = n;
-
+ fn advance_by(&mut self, mut n: usize) -> Result<(), NonZeroUsize> {
if let Some(ref mut a) = self.a {
- match a.advance_by(rem) {
+ n = match a.advance_by(n) {
Ok(()) => return Ok(()),
- Err(k) => rem -= k,
- }
+ Err(k) => k.get(),
+ };
self.a = None;
}
if let Some(ref mut b) = self.b {
- match b.advance_by(rem) {
- Ok(()) => return Ok(()),
- Err(k) => rem -= k,
- }
+ return b.advance_by(n);
// we don't fuse the second iterator
}
- if rem == 0 { Ok(()) } else { Err(n - rem) }
+ NonZeroUsize::new(n).map_or(Ok(()), Err)
}
#[inline]
fn nth(&mut self, mut n: usize) -> Option<Self::Item> {
if let Some(ref mut a) = self.a {
- match a.advance_by(n) {
+ n = match a.advance_by(n) {
Ok(()) => match a.next() {
- None => n = 0,
+ None => 0,
x => return x,
},
- Err(k) => n -= k,
- }
+ Err(k) => k.get(),
+ };
self.a = None;
}
@@ -186,38 +182,33 @@ where
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
- let mut rem = n;
-
+ fn advance_back_by(&mut self, mut n: usize) -> Result<(), NonZeroUsize> {
if let Some(ref mut b) = self.b {
- match b.advance_back_by(rem) {
+ n = match b.advance_back_by(n) {
Ok(()) => return Ok(()),
- Err(k) => rem -= k,
- }
+ Err(k) => k.get(),
+ };
self.b = None;
}
if let Some(ref mut a) = self.a {
- match a.advance_back_by(rem) {
- Ok(()) => return Ok(()),
- Err(k) => rem -= k,
- }
+ return a.advance_back_by(n);
// we don't fuse the second iterator
}
- if rem == 0 { Ok(()) } else { Err(n - rem) }
+ NonZeroUsize::new(n).map_or(Ok(()), Err)
}
#[inline]
fn nth_back(&mut self, mut n: usize) -> Option<Self::Item> {
if let Some(ref mut b) = self.b {
- match b.advance_back_by(n) {
+ n = match b.advance_back_by(n) {
Ok(()) => match b.next_back() {
- None => n = 0,
+ None => 0,
x => return x,
},
- Err(k) => n -= k,
- }
+ Err(k) => k.get(),
+ };
self.b = None;
}
@@ -282,6 +273,28 @@ where
{
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<A: Default, B: Default> Default for Chain<A, B> {
+ /// Creates a `Chain` from the default values for `A` and `B`.
+ ///
+ /// ```
+ /// # use core::iter::Chain;
+ /// # use core::slice;
+ /// # use std::collections::{btree_set, BTreeSet};
+ /// # use std::mem;
+ /// struct Foo<'a>(Chain<slice::Iter<'a, u8>, btree_set::Iter<'a, u8>>);
+ ///
+ /// let set = BTreeSet::<u8>::new();
+ /// let slice: &[u8] = &[];
+ /// let mut foo = Foo(slice.iter().chain(set.iter()));
+ ///
+ /// // take requires `Default`
+ /// let _: Chain<_, _> = mem::take(&mut foo.0);
+ fn default() -> Self {
+ Chain::new(Default::default(), Default::default())
+ }
+}
+
#[inline]
fn and_then_or_clear<T, U>(opt: &mut Option<T>, f: impl FnOnce(&mut T) -> Option<U>) -> Option<U> {
let x = f(opt.as_mut()?);
diff --git a/library/core/src/iter/adapters/cloned.rs b/library/core/src/iter/adapters/cloned.rs
index 914ff86c1..d3cceb8d4 100644
--- a/library/core/src/iter/adapters/cloned.rs
+++ b/library/core/src/iter/adapters/cloned.rs
@@ -153,3 +153,17 @@ where
item.clone()
}
}
+
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<I: Default> Default for Cloned<I> {
+ /// Creates a `Cloned` iterator from the default value of `I`
+ /// ```
+ /// # use core::slice;
+ /// # use core::iter::Cloned;
+ /// let iter: Cloned<slice::Iter<'_, u8>> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+}
diff --git a/library/core/src/iter/adapters/copied.rs b/library/core/src/iter/adapters/copied.rs
index 62d3afb81..8f6b2904e 100644
--- a/library/core/src/iter/adapters/copied.rs
+++ b/library/core/src/iter/adapters/copied.rs
@@ -4,6 +4,7 @@ use crate::iter::adapters::{
use crate::iter::{FusedIterator, TrustedLen};
use crate::mem::MaybeUninit;
use crate::mem::SizedTypeProperties;
+use crate::num::NonZeroUsize;
use crate::ops::Try;
use crate::{array, ptr};
@@ -89,7 +90,7 @@ where
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.it.advance_by(n)
}
@@ -130,7 +131,7 @@ where
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.it.advance_back_by(n)
}
}
@@ -240,3 +241,17 @@ where
}
}
}
+
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<I: Default> Default for Copied<I> {
+ /// Creates a `Copied` iterator from the default value of `I`
+ /// ```
+ /// # use core::slice;
+ /// # use core::iter::Copied;
+ /// let iter: Copied<slice::Iter<'_, u8>> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+}
diff --git a/library/core/src/iter/adapters/cycle.rs b/library/core/src/iter/adapters/cycle.rs
index 02b593907..51bd09b6e 100644
--- a/library/core/src/iter/adapters/cycle.rs
+++ b/library/core/src/iter/adapters/cycle.rs
@@ -1,3 +1,4 @@
+use crate::num::NonZeroUsize;
use crate::{iter::FusedIterator, ops::Try};
/// An iterator that repeats endlessly.
@@ -81,23 +82,22 @@ where
#[inline]
#[rustc_inherit_overflow_checks]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- let mut rem = n;
- match self.iter.advance_by(rem) {
- ret @ Ok(_) => return ret,
- Err(advanced) => rem -= advanced,
- }
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ let mut n = match self.iter.advance_by(n) {
+ Ok(()) => return Ok(()),
+ Err(rem) => rem.get(),
+ };
- while rem > 0 {
+ while n > 0 {
self.iter = self.orig.clone();
- match self.iter.advance_by(rem) {
- ret @ Ok(_) => return ret,
- Err(0) => return Err(n - rem),
- Err(advanced) => rem -= advanced,
- }
+ n = match self.iter.advance_by(n) {
+ Ok(()) => return Ok(()),
+ e @ Err(rem) if rem.get() == n => return e,
+ Err(rem) => rem.get(),
+ };
}
- Ok(())
+ NonZeroUsize::new(n).map_or(Ok(()), Err)
}
// No `fold` override, because `fold` doesn't make much sense for `Cycle`,
diff --git a/library/core/src/iter/adapters/enumerate.rs b/library/core/src/iter/adapters/enumerate.rs
index 14a126951..00c1c377b 100644
--- a/library/core/src/iter/adapters/enumerate.rs
+++ b/library/core/src/iter/adapters/enumerate.rs
@@ -2,6 +2,7 @@ use crate::iter::adapters::{
zip::try_get_unchecked, SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce,
};
use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen};
+use crate::num::NonZeroUsize;
use crate::ops::Try;
/// An iterator that yields the current count and the element during iteration.
@@ -114,17 +115,14 @@ where
#[inline]
#[rustc_inherit_overflow_checks]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- match self.iter.advance_by(n) {
- ret @ Ok(_) => {
- self.count += n;
- ret
- }
- ret @ Err(advanced) => {
- self.count += advanced;
- ret
- }
- }
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ let remaining = self.iter.advance_by(n);
+ let advanced = match remaining {
+ Ok(()) => n,
+ Err(rem) => n - rem.get(),
+ };
+ self.count += advanced;
+ remaining
}
#[rustc_inherit_overflow_checks]
@@ -208,7 +206,7 @@ where
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
// we do not need to update the count since that only tallies the number of items
// consumed from the front. consuming items from the back can never reduce that.
self.iter.advance_back_by(n)
@@ -264,3 +262,17 @@ where
#[unstable(issue = "none", feature = "inplace_iteration")]
unsafe impl<I: InPlaceIterable> InPlaceIterable for Enumerate<I> {}
+
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<I: Default> Default for Enumerate<I> {
+ /// Creates an `Enumerate` iterator from the default value of `I`
+ /// ```
+ /// # use core::slice;
+ /// # use std::iter::Enumerate;
+ /// let iter: Enumerate<slice::Iter<'_, u8>> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Enumerate::new(Default::default())
+ }
+}
diff --git a/library/core/src/iter/adapters/flatten.rs b/library/core/src/iter/adapters/flatten.rs
index b040a0ea9..2fd8a5c1d 100644
--- a/library/core/src/iter/adapters/flatten.rs
+++ b/library/core/src/iter/adapters/flatten.rs
@@ -1,5 +1,6 @@
use crate::fmt;
use crate::iter::{DoubleEndedIterator, Fuse, FusedIterator, Iterator, Map, TrustedLen};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
/// An iterator that maps each element to an iterator, and yields the elements
@@ -75,7 +76,7 @@ where
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.inner.advance_by(n)
}
@@ -120,7 +121,7 @@ where
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.inner.advance_back_by(n)
}
}
@@ -236,7 +237,7 @@ where
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.inner.advance_by(n)
}
@@ -281,7 +282,7 @@ where
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.inner.advance_back_by(n)
}
}
@@ -302,6 +303,24 @@ where
{
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<I> Default for Flatten<I>
+where
+ I: Default + Iterator<Item: IntoIterator>,
+{
+ /// Creates a `Flatten` iterator from the default value of `I`.
+ ///
+ /// ```
+ /// # use core::slice;
+ /// # use std::iter::Flatten;
+ /// let iter: Flatten<slice::Iter<'_, [u8; 4]>> = Default::default();
+ /// assert_eq!(iter.count(), 0);
+ /// ```
+ fn default() -> Self {
+ Flatten::new(Default::default())
+ }
+}
+
/// Real logic of both `Flatten` and `FlatMap` which simply delegate to
/// this type.
#[derive(Clone, Debug)]
@@ -534,18 +553,18 @@ where
#[inline]
#[rustc_inherit_overflow_checks]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
#[inline]
#[rustc_inherit_overflow_checks]
fn advance<U: Iterator>(n: usize, iter: &mut U) -> ControlFlow<(), usize> {
match iter.advance_by(n) {
Ok(()) => ControlFlow::Break(()),
- Err(advanced) => ControlFlow::Continue(n - advanced),
+ Err(remaining) => ControlFlow::Continue(remaining.get()),
}
}
match self.iter_try_fold(n, advance) {
- ControlFlow::Continue(remaining) if remaining > 0 => Err(n - remaining),
+ ControlFlow::Continue(remaining) => NonZeroUsize::new(remaining).map_or(Ok(()), Err),
_ => Ok(()),
}
}
@@ -624,18 +643,18 @@ where
#[inline]
#[rustc_inherit_overflow_checks]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
#[inline]
#[rustc_inherit_overflow_checks]
fn advance<U: DoubleEndedIterator>(n: usize, iter: &mut U) -> ControlFlow<(), usize> {
match iter.advance_back_by(n) {
Ok(()) => ControlFlow::Break(()),
- Err(advanced) => ControlFlow::Continue(n - advanced),
+ Err(remaining) => ControlFlow::Continue(remaining.get()),
}
}
match self.iter_try_rfold(n, advance) {
- ControlFlow::Continue(remaining) if remaining > 0 => Err(n - remaining),
+ ControlFlow::Continue(remaining) => NonZeroUsize::new(remaining).map_or(Ok(()), Err),
_ => Ok(()),
}
}
diff --git a/library/core/src/iter/adapters/fuse.rs b/library/core/src/iter/adapters/fuse.rs
index c93144542..b1fa4f921 100644
--- a/library/core/src/iter/adapters/fuse.rs
+++ b/library/core/src/iter/adapters/fuse.rs
@@ -181,6 +181,21 @@ where
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<I: Default> Default for Fuse<I> {
+ /// Creates a `Fuse` iterator from the default value of `I`.
+ ///
+ /// ```
+ /// # use core::slice;
+ /// # use std::iter::Fuse;
+ /// let iter: Fuse<slice::Iter<'_, u8>> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Fuse { iter: Default::default() }
+ }
+}
+
#[unstable(feature = "trusted_len", issue = "37572")]
// SAFETY: `TrustedLen` requires that an accurate length is reported via `size_hint()`. As `Fuse`
// is just forwarding this to the wrapped iterator `I` this property is preserved and it is safe to
diff --git a/library/core/src/iter/adapters/rev.rs b/library/core/src/iter/adapters/rev.rs
index 139fb7bbd..4aaf7c61f 100644
--- a/library/core/src/iter/adapters/rev.rs
+++ b/library/core/src/iter/adapters/rev.rs
@@ -1,4 +1,5 @@
use crate::iter::{FusedIterator, TrustedLen};
+use crate::num::NonZeroUsize;
use crate::ops::Try;
/// A double-ended iterator with the direction inverted.
@@ -38,7 +39,7 @@ where
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.iter.advance_back_by(n)
}
@@ -83,7 +84,7 @@ where
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.iter.advance_by(n)
}
@@ -135,3 +136,17 @@ impl<I> FusedIterator for Rev<I> where I: FusedIterator + DoubleEndedIterator {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<I> TrustedLen for Rev<I> where I: TrustedLen + DoubleEndedIterator {}
+
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<I: Default> Default for Rev<I> {
+ /// Creates a `Rev` iterator from the default value of `I`
+ /// ```
+ /// # use core::slice;
+ /// # use core::iter::Rev;
+ /// let iter: Rev<slice::Iter<'_, u8>> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Rev::new(Default::default())
+ }
+}
diff --git a/library/core/src/iter/adapters/skip.rs b/library/core/src/iter/adapters/skip.rs
index c6334880d..306338bc7 100644
--- a/library/core/src/iter/adapters/skip.rs
+++ b/library/core/src/iter/adapters/skip.rs
@@ -1,5 +1,6 @@
use crate::intrinsics::unlikely;
use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
/// An iterator that skips over `n` elements of `iter`.
@@ -128,34 +129,27 @@ where
#[inline]
#[rustc_inherit_overflow_checks]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- let mut rem = n;
- let step_one = self.n.saturating_add(rem);
-
- match self.iter.advance_by(step_one) {
- Ok(_) => {
- rem -= step_one - self.n;
- self.n = 0;
- }
- Err(advanced) => {
- let advanced_without_skip = advanced.saturating_sub(self.n);
- self.n = self.n.saturating_sub(advanced);
- return if n == 0 { Ok(()) } else { Err(advanced_without_skip) };
- }
- }
+ fn advance_by(&mut self, mut n: usize) -> Result<(), NonZeroUsize> {
+ let skip_inner = self.n;
+ let skip_and_advance = skip_inner.saturating_add(n);
- // step_one calculation may have saturated
- if unlikely(rem > 0) {
- return match self.iter.advance_by(rem) {
- ret @ Ok(_) => ret,
- Err(advanced) => {
- rem -= advanced;
- Err(n - rem)
- }
- };
+ let remainder = match self.iter.advance_by(skip_and_advance) {
+ Ok(()) => 0,
+ Err(n) => n.get(),
+ };
+ let advanced_inner = skip_and_advance - remainder;
+ n -= advanced_inner.saturating_sub(skip_inner);
+ self.n = self.n.saturating_sub(advanced_inner);
+
+ // skip_and_advance may have saturated
+ if unlikely(remainder == 0 && n > 0) {
+ n = match self.iter.advance_by(n) {
+ Ok(()) => 0,
+ Err(n) => n.get(),
+ }
}
- Ok(())
+ NonZeroUsize::new(n).map_or(Ok(()), Err)
}
}
@@ -209,13 +203,11 @@ where
impl_fold_via_try_fold! { rfold -> try_rfold }
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let min = crate::cmp::min(self.len(), n);
- return match self.iter.advance_back_by(min) {
- ret @ Ok(_) if n <= min => ret,
- Ok(_) => Err(min),
- _ => panic!("ExactSizeIterator contract violation"),
- };
+ let rem = self.iter.advance_back_by(min);
+ assert!(rem.is_ok(), "ExactSizeIterator contract violation");
+ NonZeroUsize::new(n - min).map_or(Ok(()), Err)
}
}
diff --git a/library/core/src/iter/adapters/take.rs b/library/core/src/iter/adapters/take.rs
index d947c7b0e..ce18bffe7 100644
--- a/library/core/src/iter/adapters/take.rs
+++ b/library/core/src/iter/adapters/take.rs
@@ -1,5 +1,6 @@
use crate::cmp;
use crate::iter::{adapters::SourceIter, FusedIterator, InPlaceIterable, TrustedLen};
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
/// An iterator that only iterates over the first `n` iterations of `iter`.
@@ -121,18 +122,15 @@ where
#[inline]
#[rustc_inherit_overflow_checks]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let min = self.n.min(n);
- match self.iter.advance_by(min) {
- Ok(_) => {
- self.n -= min;
- if min < n { Err(min) } else { Ok(()) }
- }
- ret @ Err(advanced) => {
- self.n -= advanced;
- ret
- }
- }
+ let rem = match self.iter.advance_by(min) {
+ Ok(()) => 0,
+ Err(rem) => rem.get(),
+ };
+ let advanced = min - rem;
+ self.n -= advanced;
+ NonZeroUsize::new(n - advanced).map_or(Ok(()), Err)
}
}
@@ -223,7 +221,7 @@ where
#[inline]
#[rustc_inherit_overflow_checks]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
// The amount by which the inner iterator needs to be shortened for it to be
// at most as long as the take() amount.
let trim_inner = self.iter.len().saturating_sub(self.n);
@@ -232,12 +230,14 @@ where
// about having to advance more than usize::MAX here.
let advance_by = trim_inner.saturating_add(n);
- let advanced = match self.iter.advance_back_by(advance_by) {
- Ok(_) => advance_by - trim_inner,
- Err(advanced) => advanced - trim_inner,
+ let remainder = match self.iter.advance_back_by(advance_by) {
+ Ok(()) => 0,
+ Err(rem) => rem.get(),
};
- self.n -= advanced;
- return if advanced < n { Err(advanced) } else { Ok(()) };
+ let advanced_by_inner = advance_by - remainder;
+ let advanced_by = advanced_by_inner - trim_inner;
+ self.n -= advanced_by;
+ NonZeroUsize::new(n - advanced_by).map_or(Ok(()), Err)
}
}
diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs
index ae00232c1..de638552f 100644
--- a/library/core/src/iter/mod.rs
+++ b/library/core/src/iter/mod.rs
@@ -278,7 +278,7 @@
//!
//! ```
//! # #![allow(unused_must_use)]
-//! # #![cfg_attr(not(bootstrap), allow(map_unit_fn))]
+//! # #![allow(map_unit_fn)]
//! let v = vec![1, 2, 3, 4, 5];
//! v.iter().map(|x| println!("{x}"));
//! ```
diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs
index 78e27d730..37db07429 100644
--- a/library/core/src/iter/range.rs
+++ b/library/core/src/iter/range.rs
@@ -1,5 +1,6 @@
use crate::convert::TryFrom;
use crate::mem;
+use crate::num::NonZeroUsize;
use crate::ops::{self, Try};
use super::{
@@ -520,12 +521,12 @@ trait RangeIteratorImpl {
// Iterator
fn spec_next(&mut self) -> Option<Self::Item>;
fn spec_nth(&mut self, n: usize) -> Option<Self::Item>;
- fn spec_advance_by(&mut self, n: usize) -> Result<(), usize>;
+ fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize>;
// DoubleEndedIterator
fn spec_next_back(&mut self) -> Option<Self::Item>;
fn spec_nth_back(&mut self, n: usize) -> Option<Self::Item>;
- fn spec_advance_back_by(&mut self, n: usize) -> Result<(), usize>;
+ fn spec_advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize>;
}
impl<A: Step> RangeIteratorImpl for ops::Range<A> {
@@ -557,7 +558,7 @@ impl<A: Step> RangeIteratorImpl for ops::Range<A> {
}
#[inline]
- default fn spec_advance_by(&mut self, n: usize) -> Result<(), usize> {
+ default fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let available = if self.start <= self.end {
Step::steps_between(&self.start, &self.end).unwrap_or(usize::MAX)
} else {
@@ -569,7 +570,7 @@ impl<A: Step> RangeIteratorImpl for ops::Range<A> {
self.start =
Step::forward_checked(self.start.clone(), taken).expect("`Step` invariants not upheld");
- if taken < n { Err(taken) } else { Ok(()) }
+ NonZeroUsize::new(n - taken).map_or(Ok(()), Err)
}
#[inline]
@@ -598,7 +599,7 @@ impl<A: Step> RangeIteratorImpl for ops::Range<A> {
}
#[inline]
- default fn spec_advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ default fn spec_advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let available = if self.start <= self.end {
Step::steps_between(&self.start, &self.end).unwrap_or(usize::MAX)
} else {
@@ -610,7 +611,7 @@ impl<A: Step> RangeIteratorImpl for ops::Range<A> {
self.end =
Step::backward_checked(self.end.clone(), taken).expect("`Step` invariants not upheld");
- if taken < n { Err(taken) } else { Ok(()) }
+ NonZeroUsize::new(n - taken).map_or(Ok(()), Err)
}
}
@@ -641,7 +642,7 @@ impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
}
#[inline]
- fn spec_advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let available = if self.start <= self.end {
Step::steps_between(&self.start, &self.end).unwrap_or(usize::MAX)
} else {
@@ -656,7 +657,7 @@ impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
// Otherwise 0 is returned which always safe to use.
self.start = unsafe { Step::forward_unchecked(self.start.clone(), taken) };
- if taken < n { Err(taken) } else { Ok(()) }
+ NonZeroUsize::new(n - taken).map_or(Ok(()), Err)
}
#[inline]
@@ -685,7 +686,7 @@ impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
}
#[inline]
- fn spec_advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn spec_advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let available = if self.start <= self.end {
Step::steps_between(&self.start, &self.end).unwrap_or(usize::MAX)
} else {
@@ -697,7 +698,7 @@ impl<T: TrustedStep> RangeIteratorImpl for ops::Range<T> {
// SAFETY: same as the spec_advance_by() implementation
self.end = unsafe { Step::backward_unchecked(self.end.clone(), taken) };
- if taken < n { Err(taken) } else { Ok(()) }
+ NonZeroUsize::new(n - taken).map_or(Ok(()), Err)
}
}
@@ -746,7 +747,7 @@ impl<A: Step> Iterator for ops::Range<A> {
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.spec_advance_by(n)
}
@@ -824,7 +825,7 @@ impl<A: Step> DoubleEndedIterator for ops::Range<A> {
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.spec_advance_back_by(n)
}
}
diff --git a/library/core/src/iter/sources/repeat.rs b/library/core/src/iter/sources/repeat.rs
index 733142ed0..67051f6e9 100644
--- a/library/core/src/iter/sources/repeat.rs
+++ b/library/core/src/iter/sources/repeat.rs
@@ -1,4 +1,5 @@
use crate::iter::{FusedIterator, TrustedLen};
+use crate::num::NonZeroUsize;
/// Creates a new iterator that endlessly repeats a single element.
///
@@ -80,7 +81,7 @@ impl<A: Clone> Iterator for Repeat<A> {
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
// Advancing an infinite iterator of a single element is a no-op.
let _ = n;
Ok(())
@@ -109,7 +110,7 @@ impl<A: Clone> DoubleEndedIterator for Repeat<A> {
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
// Advancing an infinite iterator of a single element is a no-op.
let _ = n;
Ok(())
diff --git a/library/core/src/iter/sources/repeat_n.rs b/library/core/src/iter/sources/repeat_n.rs
index dc61d6065..0b0445850 100644
--- a/library/core/src/iter/sources/repeat_n.rs
+++ b/library/core/src/iter/sources/repeat_n.rs
@@ -1,5 +1,6 @@
use crate::iter::{FusedIterator, TrustedLen};
use crate::mem::ManuallyDrop;
+use crate::num::NonZeroUsize;
/// Creates a new iterator that repeats a single element a given number of times.
///
@@ -137,7 +138,7 @@ impl<A: Clone> Iterator for RepeatN<A> {
}
#[inline]
- fn advance_by(&mut self, skip: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, skip: usize) -> Result<(), NonZeroUsize> {
let len = self.count;
if skip >= len {
@@ -145,7 +146,8 @@ impl<A: Clone> Iterator for RepeatN<A> {
}
if skip > len {
- Err(len)
+ // SAFETY: we just checked that the difference is positive
+ Err(unsafe { NonZeroUsize::new_unchecked(skip - len) })
} else {
self.count = len - skip;
Ok(())
@@ -178,7 +180,7 @@ impl<A: Clone> DoubleEndedIterator for RepeatN<A> {
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
self.advance_by(n)
}
diff --git a/library/core/src/iter/sources/repeat_with.rs b/library/core/src/iter/sources/repeat_with.rs
index 3f34105a3..d3cd74a44 100644
--- a/library/core/src/iter/sources/repeat_with.rs
+++ b/library/core/src/iter/sources/repeat_with.rs
@@ -19,7 +19,6 @@ use crate::ops::Try;
/// please open a GitHub issue explaining your use case.
///
/// [`repeat()`]: crate::iter::repeat
-/// [`DoubleEndedIterator`]: crate::iter::DoubleEndedIterator
///
/// # Examples
///
diff --git a/library/core/src/iter/traits/accum.rs b/library/core/src/iter/traits/accum.rs
index e31669b39..f9c7eb8f9 100644
--- a/library/core/src/iter/traits/accum.rs
+++ b/library/core/src/iter/traits/accum.rs
@@ -164,12 +164,13 @@ where
/// element is encountered:
///
/// ```
+ /// let f = |&x: &i32| if x < 0 { Err("Negative element found") } else { Ok(x) };
/// let v = vec![1, 2];
- /// let res: Result<i32, &'static str> = v.iter().map(|&x: &i32|
- /// if x < 0 { Err("Negative element found") }
- /// else { Ok(x) }
- /// ).sum();
+ /// let res: Result<i32, _> = v.iter().map(f).sum();
/// assert_eq!(res, Ok(3));
+ /// let v = vec![1, -2];
+ /// let res: Result<i32, _> = v.iter().map(f).sum();
+ /// assert_eq!(res, Err("Negative element found"));
/// ```
fn sum<I>(iter: I) -> Result<T, E>
where
@@ -187,6 +188,20 @@ where
/// Takes each element in the [`Iterator`]: if it is an [`Err`], no further
/// elements are taken, and the [`Err`] is returned. Should no [`Err`]
/// occur, the product of all elements is returned.
+ ///
+ /// # Examples
+ ///
+ /// This multiplies each number in a vector of strings,
+ /// if a string could not be parsed the operation returns `Err`:
+ ///
+ /// ```
+ /// let nums = vec!["5", "10", "1", "2"];
+ /// let total: Result<usize, _> = nums.iter().map(|w| w.parse::<usize>()).product();
+ /// assert_eq!(total, Ok(100));
+ /// let nums = vec!["5", "10", "one", "2"];
+ /// let total: Result<usize, _> = nums.iter().map(|w| w.parse::<usize>()).product();
+ /// assert!(total.is_err());
+ /// ```
fn product<I>(iter: I) -> Result<T, E>
where
I: Iterator<Item = Result<U, E>>,
@@ -213,6 +228,9 @@ where
/// let words = vec!["have", "a", "great", "day"];
/// let total: Option<usize> = words.iter().map(|w| w.find('a')).sum();
/// assert_eq!(total, Some(5));
+ /// let words = vec!["have", "a", "good", "day"];
+ /// let total: Option<usize> = words.iter().map(|w| w.find('a')).sum();
+ /// assert_eq!(total, None);
/// ```
fn sum<I>(iter: I) -> Option<T>
where
@@ -230,6 +248,20 @@ where
/// Takes each element in the [`Iterator`]: if it is a [`None`], no further
/// elements are taken, and the [`None`] is returned. Should no [`None`]
/// occur, the product of all elements is returned.
+ ///
+ /// # Examples
+ ///
+ /// This multiplies each number in a vector of strings,
+ /// if a string could not be parsed the operation returns `None`:
+ ///
+ /// ```
+ /// let nums = vec!["5", "10", "1", "2"];
+ /// let total: Option<usize> = nums.iter().map(|w| w.parse::<usize>().ok()).product();
+ /// assert_eq!(total, Some(100));
+ /// let nums = vec!["5", "10", "one", "2"];
+ /// let total: Option<usize> = nums.iter().map(|w| w.parse::<usize>().ok()).product();
+ /// assert_eq!(total, None);
+ /// ```
fn product<I>(iter: I) -> Option<T>
where
I: Iterator<Item = Option<U>>,
diff --git a/library/core/src/iter/traits/double_ended.rs b/library/core/src/iter/traits/double_ended.rs
index ed23873cd..182d9f758 100644
--- a/library/core/src/iter/traits/double_ended.rs
+++ b/library/core/src/iter/traits/double_ended.rs
@@ -1,3 +1,4 @@
+use crate::num::NonZeroUsize;
use crate::ops::{ControlFlow, Try};
/// An iterator able to yield elements from both ends.
@@ -98,10 +99,11 @@ pub trait DoubleEndedIterator: Iterator {
/// eagerly skip `n` elements starting from the back by calling [`next_back`] up
/// to `n` times until [`None`] is encountered.
///
- /// `advance_back_by(n)` will return [`Ok(())`] if the iterator successfully advances by
- /// `n` elements, or [`Err(k)`] if [`None`] is encountered, where `k` is the number of
- /// elements the iterator is advanced by before running out of elements (i.e. the length
- /// of the iterator). Note that `k` is always less than `n`.
+ /// `advance_back_by(n)` will return `Ok(())` if the iterator successfully advances by
+ /// `n` elements, or a `Err(NonZeroUsize)` with value `k` if [`None`] is encountered, where `k`
+ /// is remaining number of steps that could not be advanced because the iterator ran out.
+ /// If `self` is empty and `n` is non-zero, then this returns `Err(n)`.
+ /// Otherwise, `k` is always less than `n`.
///
/// Calling `advance_back_by(0)` can do meaningful work, for example [`Flatten`] can advance its
/// outer iterator until it finds an inner iterator that is not empty, which then often
@@ -118,22 +120,26 @@ pub trait DoubleEndedIterator: Iterator {
/// ```
/// #![feature(iter_advance_by)]
///
+ /// use std::num::NonZeroUsize;
/// let a = [3, 4, 5, 6];
/// let mut iter = a.iter();
///
/// assert_eq!(iter.advance_back_by(2), Ok(()));
/// assert_eq!(iter.next_back(), Some(&4));
/// assert_eq!(iter.advance_back_by(0), Ok(()));
- /// assert_eq!(iter.advance_back_by(100), Err(1)); // only `&3` was skipped
+ /// assert_eq!(iter.advance_back_by(100), Err(NonZeroUsize::new(99).unwrap())); // only `&3` was skipped
/// ```
///
/// [`Ok(())`]: Ok
/// [`Err(k)`]: Err
#[inline]
#[unstable(feature = "iter_advance_by", reason = "recently added", issue = "77404")]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
for i in 0..n {
- self.next_back().ok_or(i)?;
+ if self.next_back().is_none() {
+ // SAFETY: `i` is always less than `n`.
+ return Err(unsafe { NonZeroUsize::new_unchecked(n - i) });
+ }
}
Ok(())
}
@@ -182,7 +188,9 @@ pub trait DoubleEndedIterator: Iterator {
#[inline]
#[stable(feature = "iter_nth_back", since = "1.37.0")]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
- self.advance_back_by(n).ok()?;
+ if self.advance_back_by(n).is_err() {
+ return None;
+ }
self.next_back()
}
@@ -365,7 +373,7 @@ impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for &'a mut I {
fn next_back(&mut self) -> Option<I::Item> {
(**self).next_back()
}
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
(**self).advance_back_by(n)
}
fn nth_back(&mut self, n: usize) -> Option<I::Item> {
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index b8e7d0a68..028776042 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -1,5 +1,6 @@
use crate::array;
use crate::cmp::{self, Ordering};
+use crate::num::NonZeroUsize;
use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, Residual, Try};
use super::super::try_process;
@@ -69,7 +70,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
#[doc(notable_trait)]
#[rustc_diagnostic_item = "Iterator"]
#[must_use = "iterators are lazy and do nothing unless consumed"]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait Iterator {
/// The type of the elements being iterated over.
#[rustc_diagnostic_item = "IteratorItem"]
@@ -307,10 +308,11 @@ pub trait Iterator {
/// This method will eagerly skip `n` elements by calling [`next`] up to `n`
/// times until [`None`] is encountered.
///
- /// `advance_by(n)` will return [`Ok(())`][Ok] if the iterator successfully advances by
- /// `n` elements, or [`Err(k)`][Err] if [`None`] is encountered, where `k` is the number
- /// of elements the iterator is advanced by before running out of elements (i.e. the
- /// length of the iterator). Note that `k` is always less than `n`.
+ /// `advance_by(n)` will return `Ok(())` if the iterator successfully advances by
+ /// `n` elements, or a `Err(NonZeroUsize)` with value `k` if [`None`] is encountered,
+ /// where `k` is remaining number of steps that could not be advanced because the iterator ran out.
+ /// If `self` is empty and `n` is non-zero, then this returns `Err(n)`.
+ /// Otherwise, `k` is always less than `n`.
///
/// Calling `advance_by(0)` can do meaningful work, for example [`Flatten`]
/// can advance its outer iterator until it finds an inner iterator that is not empty, which
@@ -326,20 +328,24 @@ pub trait Iterator {
/// ```
/// #![feature(iter_advance_by)]
///
+ /// use std::num::NonZeroUsize;
/// let a = [1, 2, 3, 4];
/// let mut iter = a.iter();
///
/// assert_eq!(iter.advance_by(2), Ok(()));
/// assert_eq!(iter.next(), Some(&3));
/// assert_eq!(iter.advance_by(0), Ok(()));
- /// assert_eq!(iter.advance_by(100), Err(1)); // only `&4` was skipped
+ /// assert_eq!(iter.advance_by(100), Err(NonZeroUsize::new(99).unwrap())); // only `&4` was skipped
/// ```
#[inline]
#[unstable(feature = "iter_advance_by", reason = "recently added", issue = "77404")]
#[rustc_do_not_const_check]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
for i in 0..n {
- self.next().ok_or(i)?;
+ if self.next().is_none() {
+ // SAFETY: `i` is always less than `n`.
+ return Err(unsafe { NonZeroUsize::new_unchecked(n - i) });
+ }
}
Ok(())
}
@@ -758,7 +764,6 @@ pub trait Iterator {
/// more idiomatic to use [`for`] than `map()`.
///
/// [`for`]: ../../book/ch03-05-control-flow.html#looping-through-a-collection-with-for
- /// [`FnMut`]: crate::ops::FnMut
///
/// # Examples
///
@@ -1998,7 +2003,7 @@ pub trait Iterator {
/// a.iter().map(|&x| x * 2).collect_into(&mut vec);
/// a.iter().map(|&x| x * 10).collect_into(&mut vec);
///
- /// assert_eq!(vec![0, 1, 2, 4, 6, 10, 20, 30], vec);
+ /// assert_eq!(vec, vec![0, 1, 2, 4, 6, 10, 20, 30]);
/// ```
///
/// `Vec` can have a manual set capacity to avoid reallocating it:
@@ -2013,7 +2018,7 @@ pub trait Iterator {
/// a.iter().map(|&x| x * 10).collect_into(&mut vec);
///
/// assert_eq!(6, vec.capacity());
- /// println!("{:?}", vec);
+ /// assert_eq!(vec, vec![2, 4, 6, 10, 20, 30]);
/// ```
///
/// The returned mutable reference can be used to continue the call chain:
@@ -2027,12 +2032,12 @@ pub trait Iterator {
/// let count = a.iter().collect_into(&mut vec).iter().count();
///
/// assert_eq!(count, vec.len());
- /// println!("Vec len is {}", count);
+ /// assert_eq!(vec, vec![1, 2, 3]);
///
/// let count = a.iter().collect_into(&mut vec).iter().count();
///
/// assert_eq!(count, vec.len());
- /// println!("Vec len now is {}", count);
+ /// assert_eq!(vec, vec![1, 2, 3, 1, 2, 3]);
/// ```
#[inline]
#[unstable(feature = "iter_collect_into", reason = "new API", issue = "94780")]
@@ -2109,8 +2114,8 @@ pub trait Iterator {
///
/// # Current implementation
///
- /// Current algorithms tries finding the first element for which the predicate evaluates
- /// to false, and the last element for which it evaluates to true and repeatedly swaps them.
+ /// The current algorithm tries to find the first element for which the predicate evaluates
+ /// to false and the last element for which it evaluates to true, and repeatedly swaps them.
///
/// Time complexity: *O*(*n*)
///
@@ -3443,6 +3448,9 @@ pub trait Iterator {
///
/// An empty iterator returns the zero value of the type.
///
+ /// `sum()` can be used to sum any type implementing [`Sum`][`core::iter::Sum`],
+ /// including [`Option`][`Option::sum`] and [`Result`][`Result::sum`].
+ ///
/// # Panics
///
/// When calling `sum()` and a primitive integer type is being returned, this
@@ -3473,6 +3481,9 @@ pub trait Iterator {
///
/// An empty iterator returns the one value of the type.
///
+ /// `product()` can be used to multiply any type implementing [`Product`][`core::iter::Product`],
+ /// including [`Option`][`Option::product`] and [`Result`][`Result::product`].
+ ///
/// # Panics
///
/// When calling `product()` and a primitive integer type is being returned,
@@ -3721,7 +3732,7 @@ pub trait Iterator {
}
}
- /// Determines if the elements of this [`Iterator`] are unequal to those of
+ /// Determines if the elements of this [`Iterator`] are not equal to those of
/// another.
///
/// # Examples
@@ -4002,7 +4013,7 @@ impl<I: Iterator + ?Sized> Iterator for &mut I {
fn size_hint(&self) -> (usize, Option<usize>) {
(**self).size_hint()
}
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
(**self).advance_by(n)
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 24bad799f..04243544b 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -95,14 +95,17 @@
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
#![allow(incomplete_features)]
-#![cfg_attr(not(bootstrap), warn(multiple_supertrait_upcastable))]
+#![warn(multiple_supertrait_upcastable)]
//
// Library features:
-#![feature(const_align_offset)]
+// tidy-alphabetical-start
+#![feature(char_indices_offset)]
#![feature(const_align_of_val)]
#![feature(const_align_of_val_raw)]
+#![feature(const_align_offset)]
#![feature(const_alloc_layout)]
#![feature(const_arguments_as_str)]
+#![feature(const_array_from_ref)]
#![feature(const_array_into_iter_constructors)]
#![feature(const_bigint_helper_methods)]
#![feature(const_black_box)]
@@ -111,6 +114,9 @@
#![feature(const_char_from_u32_unchecked)]
#![feature(const_clone)]
#![feature(const_cmp)]
+#![feature(const_convert)]
+#![feature(const_cstr_methods)]
+#![feature(const_default_impls)]
#![feature(const_discriminant)]
#![feature(const_eval_select)]
#![feature(const_exact_div)]
@@ -119,17 +125,17 @@
#![feature(const_fmt_arguments_new)]
#![feature(const_hash)]
#![feature(const_heap)]
-#![feature(const_convert)]
#![feature(const_index_range_slice_index)]
#![feature(const_inherent_unchecked_arith)]
#![feature(const_int_unchecked_arith)]
#![feature(const_intrinsic_forget)]
#![feature(const_ipv4)]
#![feature(const_ipv6)]
+#![feature(const_is_char_boundary)]
#![feature(const_likely)]
-#![feature(const_maybe_uninit_uninit_array)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_maybe_uninit_assume_init)]
+#![feature(const_maybe_uninit_uninit_array)]
#![feature(const_nonnull_new)]
#![feature(const_num_from_num)]
#![feature(const_ops)]
@@ -138,32 +144,35 @@
#![feature(const_pin)]
#![feature(const_pointer_byte_offsets)]
#![feature(const_pointer_is_aligned)]
-#![feature(const_ptr_sub_ptr)]
-#![feature(const_replace)]
-#![feature(const_result_drop)]
#![feature(const_ptr_as_ref)]
#![feature(const_ptr_is_null)]
#![feature(const_ptr_read)]
+#![feature(const_ptr_sub_ptr)]
#![feature(const_ptr_write)]
#![feature(const_raw_ptr_comparison)]
+#![feature(const_replace)]
+#![feature(const_result_drop)]
#![feature(const_size_of_val)]
#![feature(const_size_of_val_raw)]
#![feature(const_slice_from_raw_parts_mut)]
+#![feature(const_slice_from_ref)]
+#![feature(const_slice_index)]
#![feature(const_slice_ptr_len)]
#![feature(const_slice_split_at_mut)]
#![feature(const_str_from_utf8_unchecked_mut)]
#![feature(const_swap)]
#![feature(const_trait_impl)]
+#![feature(const_transmute_copy)]
#![feature(const_try)]
#![feature(const_type_id)]
#![feature(const_type_name)]
-#![feature(const_default_impls)]
#![feature(const_unicode_case_lookup)]
#![feature(const_unsafecell_get_mut)]
#![feature(const_waker)]
#![feature(core_panic)]
-#![feature(char_indices_offset)]
#![feature(duration_consts_float)]
+#![feature(ip)]
+#![feature(is_ascii_octdigit)]
#![feature(maybe_uninit_uninit_array)]
#![feature(ptr_alignment_type)]
#![feature(ptr_metadata)]
@@ -171,25 +180,21 @@
#![feature(slice_ptr_get)]
#![feature(slice_split_at_unchecked)]
#![feature(str_internals)]
-#![feature(str_split_remainder)]
#![feature(str_split_inclusive_remainder)]
+#![feature(str_split_remainder)]
#![feature(strict_provenance)]
#![feature(utf16_extra)]
#![feature(utf16_extra_const)]
#![feature(variant_count)]
-#![feature(const_array_from_ref)]
-#![feature(const_slice_from_ref)]
-#![feature(const_slice_index)]
-#![feature(const_is_char_boundary)]
-#![feature(const_cstr_methods)]
-#![feature(ip)]
-#![feature(is_ascii_octdigit)]
+// tidy-alphabetical-end
//
// Language features:
+// tidy-alphabetical-start
#![feature(abi_unadjusted)]
#![feature(adt_const_params)]
#![feature(allow_internal_unsafe)]
#![feature(allow_internal_unstable)]
+#![feature(asm_const)]
#![feature(associated_type_bounds)]
#![feature(auto_traits)]
#![feature(c_unwind)]
@@ -206,12 +211,12 @@
#![feature(deprecated_suggestion)]
#![feature(derive_const)]
#![feature(doc_cfg)]
+#![feature(doc_cfg_hide)]
#![feature(doc_notable_trait)]
-#![feature(rustdoc_internals)]
#![feature(exhaustive_patterns)]
-#![feature(doc_cfg_hide)]
#![feature(extern_types)]
#![feature(fundamental)]
+#![feature(generic_arg_infer)]
#![feature(if_let_guard)]
#![feature(inline_const)]
#![feature(intra_doc_pointers)]
@@ -220,6 +225,7 @@
#![feature(link_llvm_intrinsics)]
#![feature(macro_metavar_expr)]
#![feature(min_specialization)]
+#![feature(multiple_supertrait_upcastable)]
#![feature(must_not_suspend)]
#![feature(negative_impls)]
#![feature(never_type)]
@@ -230,6 +236,7 @@
#![feature(repr_simd)]
#![feature(rustc_allow_const_fn_unstable)]
#![feature(rustc_attrs)]
+#![feature(rustdoc_internals)]
#![feature(simd_ffi)]
#![feature(staged_api)]
#![feature(stmt_expr_attributes)]
@@ -239,11 +246,10 @@
#![feature(try_blocks)]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
-#![feature(asm_const)]
-#![feature(const_transmute_copy)]
-#![cfg_attr(not(bootstrap), feature(multiple_supertrait_upcastable))]
+// tidy-alphabetical-end
//
// Target features:
+// tidy-alphabetical-start
#![feature(arm_target_feature)]
#![feature(avx512_target_feature)]
#![feature(hexagon_target_feature)]
@@ -254,7 +260,7 @@
#![feature(sse4a_target_feature)]
#![feature(tbm_target_feature)]
#![feature(wasm_target_feature)]
-#![cfg_attr(bootstrap, feature(cmpxchg16b_target_feature))]
+// tidy-alphabetical-end
// allow using `core::` in intra-doc links
#[allow(unused_extern_crates)]
diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs
index 3b026bc0e..7c93c93b4 100644
--- a/library/core/src/macros/mod.rs
+++ b/library/core/src/macros/mod.rs
@@ -340,9 +340,9 @@ pub macro debug_assert_matches($($arg:tt)*) {
#[stable(feature = "matches_macro", since = "1.42.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "matches_macro")]
macro_rules! matches {
- ($expression:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )? $(,)?) => {
+ ($expression:expr, $pattern:pat $(if $guard:expr)? $(,)?) => {
match $expression {
- $( $pattern )|+ $( if $guard )? => true,
+ $pattern $(if $guard)? => true,
_ => false
}
};
@@ -712,8 +712,8 @@ macro_rules! unimplemented {
/// Indicates unfinished code.
///
-/// This can be useful if you are prototyping and are just looking to have your
-/// code typecheck.
+/// This can be useful if you are prototyping and just
+/// want a placeholder to let your code pass type analysis.
///
/// The difference between [`unimplemented!`] and `todo!` is that while `todo!` conveys
/// an intent of implementing the functionality later and the message is "not yet
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
index 520ae0edb..3cd4f5104 100644
--- a/library/core/src/marker.rs
+++ b/library/core/src/marker.rs
@@ -97,7 +97,7 @@ unsafe impl<T: Sync + ?Sized> Send for &T {}
#[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable
#[rustc_specialization_trait]
#[rustc_deny_explicit_impl]
-#[cfg_attr(not(bootstrap), rustc_coinductive)]
+#[rustc_coinductive]
pub trait Sized {
// Empty.
}
@@ -324,7 +324,7 @@ pub trait StructuralEq {
/// attempt to derive a `Copy` implementation, we'll get an error:
///
/// ```text
-/// the trait `Copy` may not be implemented for this type; field `points` does not implement `Copy`
+/// the trait `Copy` cannot be implemented for this type; field `points` does not implement `Copy`
/// ```
///
/// Shared references (`&T`) are also `Copy`, so a type can be `Copy`, even when it holds
@@ -823,7 +823,7 @@ unsafe impl<T: ?Sized> Freeze for &mut T {}
/// [`pin` module]: crate::pin
#[stable(feature = "pin", since = "1.33.0")]
#[rustc_on_unimplemented(
- note = "consider using `Box::pin`",
+ note = "consider using the `pin!` macro\nconsider using `Box::pin` if you need to access the pinned value outside of the current scope",
message = "`{Self}` cannot be unpinned"
)]
#[lang = "unpin"]
@@ -877,10 +877,9 @@ pub trait Tuple {}
/// All types that have the same size and alignment as a `usize` or
/// `*const ()` automatically implement this trait.
#[unstable(feature = "pointer_like_trait", issue = "none")]
-#[cfg_attr(bootstrap, lang = "pointer_sized")]
-#[cfg_attr(not(bootstrap), lang = "pointer_like")]
+#[lang = "pointer_like"]
#[rustc_on_unimplemented(
- message = "`{Self}` needs to have the same alignment and size as a pointer",
+ message = "`{Self}` needs to have the same ABI as a pointer",
label = "`{Self}` needs to be a pointer-like type"
)]
pub trait PointerLike {}
@@ -923,3 +922,18 @@ mod copy_impls {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Copy for &T {}
}
+
+/// A common trait implemented by all function pointers.
+#[unstable(
+ feature = "fn_ptr_trait",
+ issue = "none",
+ reason = "internal trait for implementing various traits for all function pointers"
+)]
+#[lang = "fn_ptr_trait"]
+#[cfg(not(bootstrap))]
+#[rustc_deny_explicit_impl]
+pub trait FnPtr: Copy + Clone {
+ /// Returns the address of the function pointer.
+ #[lang = "fn_ptr_addr"]
+ fn addr(self) -> *const ();
+}
diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs
index 3f4918365..9c6d48675 100644
--- a/library/core/src/mem/maybe_uninit.rs
+++ b/library/core/src/mem/maybe_uninit.rs
@@ -1241,13 +1241,9 @@ impl<T> MaybeUninit<T> {
/// ```
#[unstable(feature = "maybe_uninit_as_bytes", issue = "93092")]
pub fn slice_as_bytes(this: &[MaybeUninit<T>]) -> &[MaybeUninit<u8>] {
+ let bytes = mem::size_of_val(this);
// SAFETY: MaybeUninit<u8> is always valid, even for padding bytes
- unsafe {
- slice::from_raw_parts(
- this.as_ptr() as *const MaybeUninit<u8>,
- this.len() * mem::size_of::<T>(),
- )
- }
+ unsafe { slice::from_raw_parts(this.as_ptr() as *const MaybeUninit<u8>, bytes) }
}
/// Returns the contents of this mutable slice of `MaybeUninit` as a mutable slice of
@@ -1274,13 +1270,9 @@ impl<T> MaybeUninit<T> {
/// ```
#[unstable(feature = "maybe_uninit_as_bytes", issue = "93092")]
pub fn slice_as_bytes_mut(this: &mut [MaybeUninit<T>]) -> &mut [MaybeUninit<u8>] {
+ let bytes = mem::size_of_val(this);
// SAFETY: MaybeUninit<u8> is always valid, even for padding bytes
- unsafe {
- slice::from_raw_parts_mut(
- this.as_mut_ptr() as *mut MaybeUninit<u8>,
- this.len() * mem::size_of::<T>(),
- )
- }
+ unsafe { slice::from_raw_parts_mut(this.as_mut_ptr() as *mut MaybeUninit<u8>, bytes) }
}
}
diff --git a/library/core/src/mem/transmutability.rs b/library/core/src/mem/transmutability.rs
index 3b98efff2..b53a330fa 100644
--- a/library/core/src/mem/transmutability.rs
+++ b/library/core/src/mem/transmutability.rs
@@ -5,10 +5,6 @@
/// notwithstanding whatever safety checks you have asked the compiler to [`Assume`] are satisfied.
#[unstable(feature = "transmutability", issue = "99571")]
#[lang = "transmute_trait"]
-#[rustc_on_unimplemented(
- message = "`{Src}` cannot be safely transmuted into `{Self}` in the defining scope of `{Context}`.",
- label = "`{Src}` cannot be safely transmuted into `{Self}` in the defining scope of `{Context}`."
-)]
pub unsafe trait BikeshedIntrinsicFrom<Src, Context, const ASSUME: Assume = { Assume::NOTHING }>
where
Src: ?Sized,
diff --git a/library/core/src/net/parser.rs b/library/core/src/net/parser.rs
index a08d2792d..b9a1924d6 100644
--- a/library/core/src/net/parser.rs
+++ b/library/core/src/net/parser.rs
@@ -9,7 +9,7 @@ use crate::fmt;
use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
use crate::str::FromStr;
-trait ReadNumberHelper: crate::marker::Sized {
+trait ReadNumberHelper: Sized {
const ZERO: Self;
fn checked_mul(&self, other: u32) -> Option<Self>;
fn checked_add(&self, other: u32) -> Option<Self>;
diff --git a/library/core/src/num/dec2flt/common.rs b/library/core/src/num/dec2flt/common.rs
index 17957d7e7..11a626485 100644
--- a/library/core/src/num/dec2flt/common.rs
+++ b/library/core/src/num/dec2flt/common.rs
@@ -1,165 +1,60 @@
//! Common utilities, for internal use only.
-use crate::ptr;
-
/// Helper methods to process immutable bytes.
-pub(crate) trait ByteSlice: AsRef<[u8]> {
- unsafe fn first_unchecked(&self) -> u8 {
- debug_assert!(!self.is_empty());
- // SAFETY: safe as long as self is not empty
- unsafe { *self.as_ref().get_unchecked(0) }
- }
-
- /// Get if the slice contains no elements.
- fn is_empty(&self) -> bool {
- self.as_ref().is_empty()
- }
-
- /// Check if the slice at least `n` length.
- fn check_len(&self, n: usize) -> bool {
- n <= self.as_ref().len()
- }
-
- /// Check if the first character in the slice is equal to c.
- fn first_is(&self, c: u8) -> bool {
- self.as_ref().first() == Some(&c)
- }
-
- /// Check if the first character in the slice is equal to c1 or c2.
- fn first_is2(&self, c1: u8, c2: u8) -> bool {
- if let Some(&c) = self.as_ref().first() { c == c1 || c == c2 } else { false }
- }
-
- /// Bounds-checked test if the first character in the slice is a digit.
- fn first_isdigit(&self) -> bool {
- if let Some(&c) = self.as_ref().first() { c.is_ascii_digit() } else { false }
- }
-
- /// Check if self starts with u with a case-insensitive comparison.
- fn starts_with_ignore_case(&self, u: &[u8]) -> bool {
- debug_assert!(self.as_ref().len() >= u.len());
- let iter = self.as_ref().iter().zip(u.iter());
- let d = iter.fold(0, |i, (&x, &y)| i | (x ^ y));
- d == 0 || d == 32
- }
-
- /// Get the remaining slice after the first N elements.
- fn advance(&self, n: usize) -> &[u8] {
- &self.as_ref()[n..]
- }
-
- /// Get the slice after skipping all leading characters equal c.
- fn skip_chars(&self, c: u8) -> &[u8] {
- let mut s = self.as_ref();
- while s.first_is(c) {
- s = s.advance(1);
- }
- s
- }
-
- /// Get the slice after skipping all leading characters equal c1 or c2.
- fn skip_chars2(&self, c1: u8, c2: u8) -> &[u8] {
- let mut s = self.as_ref();
- while s.first_is2(c1, c2) {
- s = s.advance(1);
- }
- s
- }
-
+pub(crate) trait ByteSlice {
/// Read 8 bytes as a 64-bit integer in little-endian order.
- unsafe fn read_u64_unchecked(&self) -> u64 {
- debug_assert!(self.check_len(8));
- let src = self.as_ref().as_ptr() as *const u64;
- // SAFETY: safe as long as self is at least 8 bytes
- u64::from_le(unsafe { ptr::read_unaligned(src) })
- }
+ fn read_u64(&self) -> u64;
- /// Try to read the next 8 bytes from the slice.
- fn read_u64(&self) -> Option<u64> {
- if self.check_len(8) {
- // SAFETY: self must be at least 8 bytes.
- Some(unsafe { self.read_u64_unchecked() })
- } else {
- None
- }
- }
-
- /// Calculate the offset of slice from another.
- fn offset_from(&self, other: &Self) -> isize {
- other.as_ref().len() as isize - self.as_ref().len() as isize
- }
-}
-
-impl ByteSlice for [u8] {}
-
-/// Helper methods to process mutable bytes.
-pub(crate) trait ByteSliceMut: AsMut<[u8]> {
/// Write a 64-bit integer as 8 bytes in little-endian order.
- unsafe fn write_u64_unchecked(&mut self, value: u64) {
- debug_assert!(self.as_mut().len() >= 8);
- let dst = self.as_mut().as_mut_ptr() as *mut u64;
- // NOTE: we must use `write_unaligned`, since dst is not
- // guaranteed to be properly aligned. Miri will warn us
- // if we use `write` instead of `write_unaligned`, as expected.
- // SAFETY: safe as long as self is at least 8 bytes
- unsafe {
- ptr::write_unaligned(dst, u64::to_le(value));
- }
- }
-}
+ fn write_u64(&mut self, value: u64);
-impl ByteSliceMut for [u8] {}
+ /// Calculate the offset of a slice from another.
+ fn offset_from(&self, other: &Self) -> isize;
-/// Bytes wrapper with specialized methods for ASCII characters.
-#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-pub(crate) struct AsciiStr<'a> {
- slc: &'a [u8],
+ /// Iteratively parse and consume digits from bytes.
+ /// Returns the same bytes with consumed digits being
+ /// elided.
+ fn parse_digits(&self, func: impl FnMut(u8)) -> &Self;
}
-impl<'a> AsciiStr<'a> {
- pub fn new(slc: &'a [u8]) -> Self {
- Self { slc }
+impl ByteSlice for [u8] {
+ #[inline(always)] // inlining this is crucial to remove bound checks
+ fn read_u64(&self) -> u64 {
+ let mut tmp = [0; 8];
+ tmp.copy_from_slice(&self[..8]);
+ u64::from_le_bytes(tmp)
}
- /// Advance the view by n, advancing it in-place to (n..).
- pub unsafe fn step_by(&mut self, n: usize) -> &mut Self {
- // SAFETY: safe as long n is less than the buffer length
- self.slc = unsafe { self.slc.get_unchecked(n..) };
- self
+ #[inline(always)] // inlining this is crucial to remove bound checks
+ fn write_u64(&mut self, value: u64) {
+ self[..8].copy_from_slice(&value.to_le_bytes())
}
- /// Advance the view by n, advancing it in-place to (1..).
- pub unsafe fn step(&mut self) -> &mut Self {
- // SAFETY: safe as long as self is not empty
- unsafe { self.step_by(1) }
+ #[inline]
+ fn offset_from(&self, other: &Self) -> isize {
+ other.len() as isize - self.len() as isize
}
- /// Iteratively parse and consume digits from bytes.
- pub fn parse_digits(&mut self, mut func: impl FnMut(u8)) {
- while let Some(&c) = self.as_ref().first() {
+ #[inline]
+ fn parse_digits(&self, mut func: impl FnMut(u8)) -> &Self {
+ let mut s = self;
+
+ // FIXME: Can't use s.split_first() here yet,
+ // see https://github.com/rust-lang/rust/issues/109328
+ while let [c, s_next @ ..] = s {
let c = c.wrapping_sub(b'0');
if c < 10 {
func(c);
- // SAFETY: self cannot be empty
- unsafe {
- self.step();
- }
+ s = s_next;
} else {
break;
}
}
- }
-}
-impl<'a> AsRef<[u8]> for AsciiStr<'a> {
- #[inline]
- fn as_ref(&self) -> &[u8] {
- self.slc
+ s
}
}
-impl<'a> ByteSlice for AsciiStr<'a> {}
-
/// Determine if 8 bytes are all decimal digits.
/// This does not care about the order in which the bytes were loaded.
pub(crate) fn is_8digits(v: u64) -> bool {
@@ -168,19 +63,6 @@ pub(crate) fn is_8digits(v: u64) -> bool {
(a | b) & 0x8080_8080_8080_8080 == 0
}
-/// Iteratively parse and consume digits from bytes.
-pub(crate) fn parse_digits(s: &mut &[u8], mut f: impl FnMut(u8)) {
- while let Some(&c) = s.get(0) {
- let c = c.wrapping_sub(b'0');
- if c < 10 {
- f(c);
- *s = s.advance(1);
- } else {
- break;
- }
- }
-}
-
/// A custom 64-bit floating point type, representing `f * 2^e`.
/// e is biased, so it be directly shifted into the exponent bits.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
@@ -192,6 +74,7 @@ pub struct BiasedFp {
}
impl BiasedFp {
+ #[inline]
pub const fn zero_pow2(e: i32) -> Self {
Self { f: 0, e }
}
diff --git a/library/core/src/num/dec2flt/decimal.rs b/library/core/src/num/dec2flt/decimal.rs
index 2019f71e6..350f64bb4 100644
--- a/library/core/src/num/dec2flt/decimal.rs
+++ b/library/core/src/num/dec2flt/decimal.rs
@@ -9,7 +9,7 @@
//! algorithm can be found in "ParseNumberF64 by Simple Decimal Conversion",
//! available online: <https://nigeltao.github.io/blog/2020/parse-number-f64-simple.html>.
-use crate::num::dec2flt::common::{is_8digits, parse_digits, ByteSlice, ByteSliceMut};
+use crate::num::dec2flt::common::{is_8digits, ByteSlice};
#[derive(Clone)]
pub struct Decimal {
@@ -205,29 +205,32 @@ impl Decimal {
pub fn parse_decimal(mut s: &[u8]) -> Decimal {
let mut d = Decimal::default();
let start = s;
- s = s.skip_chars(b'0');
- parse_digits(&mut s, |digit| d.try_add_digit(digit));
- if s.first_is(b'.') {
- s = s.advance(1);
+
+ while let Some((&b'0', s_next)) = s.split_first() {
+ s = s_next;
+ }
+
+ s = s.parse_digits(|digit| d.try_add_digit(digit));
+
+ if let Some((b'.', s_next)) = s.split_first() {
+ s = s_next;
let first = s;
// Skip leading zeros.
if d.num_digits == 0 {
- s = s.skip_chars(b'0');
+ while let Some((&b'0', s_next)) = s.split_first() {
+ s = s_next;
+ }
}
while s.len() >= 8 && d.num_digits + 8 < Decimal::MAX_DIGITS {
- // SAFETY: s is at least 8 bytes.
- let v = unsafe { s.read_u64_unchecked() };
+ let v = s.read_u64();
if !is_8digits(v) {
break;
}
- // SAFETY: d.num_digits + 8 is less than d.digits.len()
- unsafe {
- d.digits[d.num_digits..].write_u64_unchecked(v - 0x3030_3030_3030_3030);
- }
+ d.digits[d.num_digits..].write_u64(v - 0x3030_3030_3030_3030);
d.num_digits += 8;
- s = s.advance(8);
+ s = &s[8..];
}
- parse_digits(&mut s, |digit| d.try_add_digit(digit));
+ s = s.parse_digits(|digit| d.try_add_digit(digit));
d.decimal_point = s.len() as i32 - first.len() as i32;
}
if d.num_digits != 0 {
@@ -248,22 +251,26 @@ pub fn parse_decimal(mut s: &[u8]) -> Decimal {
d.num_digits = Decimal::MAX_DIGITS;
}
}
- if s.first_is2(b'e', b'E') {
- s = s.advance(1);
- let mut neg_exp = false;
- if s.first_is(b'-') {
- neg_exp = true;
- s = s.advance(1);
- } else if s.first_is(b'+') {
- s = s.advance(1);
- }
- let mut exp_num = 0_i32;
- parse_digits(&mut s, |digit| {
- if exp_num < 0x10000 {
- exp_num = 10 * exp_num + digit as i32;
+ if let Some((&ch, s_next)) = s.split_first() {
+ if ch == b'e' || ch == b'E' {
+ s = s_next;
+ let mut neg_exp = false;
+ if let Some((&ch, s_next)) = s.split_first() {
+ neg_exp = ch == b'-';
+ if ch == b'-' || ch == b'+' {
+ s = s_next;
+ }
}
- });
- d.decimal_point += if neg_exp { -exp_num } else { exp_num };
+ let mut exp_num = 0_i32;
+
+ s.parse_digits(|digit| {
+ if exp_num < 0x10000 {
+ exp_num = 10 * exp_num + digit as i32;
+ }
+ });
+
+ d.decimal_point += if neg_exp { -exp_num } else { exp_num };
+ }
}
for i in d.num_digits..Decimal::MAX_DIGITS_WITHOUT_OVERFLOW {
d.digits[i] = 0;
diff --git a/library/core/src/num/dec2flt/float.rs b/library/core/src/num/dec2flt/float.rs
index 5921c5ed4..1c9d68999 100644
--- a/library/core/src/num/dec2flt/float.rs
+++ b/library/core/src/num/dec2flt/float.rs
@@ -118,11 +118,13 @@ impl RawFloat for f32 {
const SMALLEST_POWER_OF_TEN: i32 = -65;
const LARGEST_POWER_OF_TEN: i32 = 38;
+ #[inline]
fn from_u64(v: u64) -> Self {
debug_assert!(v <= Self::MAX_MANTISSA_FAST_PATH);
v as _
}
+ #[inline]
fn from_u64_bits(v: u64) -> Self {
f32::from_bits((v & 0xFFFFFFFF) as u32)
}
@@ -169,11 +171,13 @@ impl RawFloat for f64 {
const SMALLEST_POWER_OF_TEN: i32 = -342;
const LARGEST_POWER_OF_TEN: i32 = 308;
+ #[inline]
fn from_u64(v: u64) -> Self {
debug_assert!(v <= Self::MAX_MANTISSA_FAST_PATH);
v as _
}
+ #[inline]
fn from_u64_bits(v: u64) -> Self {
f64::from_bits(v)
}
diff --git a/library/core/src/num/dec2flt/lemire.rs b/library/core/src/num/dec2flt/lemire.rs
index 9f7594460..3bc052df7 100644
--- a/library/core/src/num/dec2flt/lemire.rs
+++ b/library/core/src/num/dec2flt/lemire.rs
@@ -118,10 +118,12 @@ pub fn compute_float<F: RawFloat>(q: i64, mut w: u64) -> BiasedFp {
/// This uses a pre-computed integer approximation for
/// log2(10), where 217706 / 2^16 is accurate for the
/// entire range of non-finite decimal exponents.
+#[inline]
fn power(q: i32) -> i32 {
(q.wrapping_mul(152_170 + 65536) >> 16) + 63
}
+#[inline]
fn full_multiplication(a: u64, b: u64) -> (u64, u64) {
let r = (a as u128) * (b as u128);
(r as u64, (r >> 64) as u64)
diff --git a/library/core/src/num/dec2flt/mod.rs b/library/core/src/num/dec2flt/mod.rs
index f8d493e8b..a4bc8b1c9 100644
--- a/library/core/src/num/dec2flt/mod.rs
+++ b/library/core/src/num/dec2flt/mod.rs
@@ -79,7 +79,7 @@ use crate::error::Error;
use crate::fmt;
use crate::str::FromStr;
-use self::common::{BiasedFp, ByteSlice};
+use self::common::BiasedFp;
use self::float::RawFloat;
use self::lemire::compute_float;
use self::parse::{parse_inf_nan, parse_number};
@@ -147,7 +147,13 @@ macro_rules! from_str_float_impl {
/// representable floating-point number to the number represented
/// by `src` (following the same rules for rounding as for the
/// results of primitive operations).
- #[inline]
+ // We add the `#[inline(never)]` attribute, since its content will
+ // be filled with that of `dec2flt`, which has #[inline(always)].
+ // Since `dec2flt` is generic, a normal inline attribute on this function
+ // with `dec2flt` having no attributes results in heavily repeated
+ // generation of `dec2flt`, despite the fact only a maximum of 2
+ // possible instances can ever exist. Adding #[inline(never)] avoids this.
+ #[inline(never)]
fn from_str(src: &str) -> Result<Self, ParseFloatError> {
dec2flt(src)
}
@@ -202,12 +208,14 @@ impl fmt::Display for ParseFloatError {
}
}
+#[inline]
pub(super) fn pfe_empty() -> ParseFloatError {
ParseFloatError { kind: FloatErrorKind::Empty }
}
// Used in unit tests, keep public.
// This is much better than making FloatErrorKind and ParseFloatError::kind public.
+#[inline]
pub fn pfe_invalid() -> ParseFloatError {
ParseFloatError { kind: FloatErrorKind::Invalid }
}
@@ -220,6 +228,7 @@ fn biased_fp_to_float<T: RawFloat>(x: BiasedFp) -> T {
}
/// Converts a decimal string into a floating point number.
+#[inline(always)] // Will be inlined into a function with `#[inline(never)]`, see above
pub fn dec2flt<F: RawFloat>(s: &str) -> Result<F, ParseFloatError> {
let mut s = s.as_bytes();
let c = if let Some(&c) = s.first() {
@@ -229,17 +238,18 @@ pub fn dec2flt<F: RawFloat>(s: &str) -> Result<F, ParseFloatError> {
};
let negative = c == b'-';
if c == b'-' || c == b'+' {
- s = s.advance(1);
+ s = &s[1..];
}
if s.is_empty() {
return Err(pfe_invalid());
}
- let num = match parse_number(s, negative) {
+ let mut num = match parse_number(s) {
Some(r) => r,
None if let Some(value) = parse_inf_nan(s, negative) => return Ok(value),
None => return Err(pfe_invalid()),
};
+ num.negative = negative;
if let Some(value) = num.try_fast_path::<F>() {
return Ok(value);
}
diff --git a/library/core/src/num/dec2flt/number.rs b/library/core/src/num/dec2flt/number.rs
index 405f7e7b6..8589e2bbd 100644
--- a/library/core/src/num/dec2flt/number.rs
+++ b/library/core/src/num/dec2flt/number.rs
@@ -33,6 +33,7 @@ pub struct Number {
impl Number {
/// Detect if the float can be accurately reconstructed from native floats.
+ #[inline]
fn is_fast_path<F: RawFloat>(&self) -> bool {
F::MIN_EXPONENT_FAST_PATH <= self.exponent
&& self.exponent <= F::MAX_EXPONENT_DISGUISED_FAST_PATH
diff --git a/library/core/src/num/dec2flt/parse.rs b/library/core/src/num/dec2flt/parse.rs
index 1a90e0d20..b0a23835c 100644
--- a/library/core/src/num/dec2flt/parse.rs
+++ b/library/core/src/num/dec2flt/parse.rs
@@ -1,6 +1,6 @@
//! Functions to parse floating-point numbers.
-use crate::num::dec2flt::common::{is_8digits, AsciiStr, ByteSlice};
+use crate::num::dec2flt::common::{is_8digits, ByteSlice};
use crate::num::dec2flt::float::RawFloat;
use crate::num::dec2flt::number::Number;
@@ -26,24 +26,39 @@ fn parse_8digits(mut v: u64) -> u64 {
}
/// Parse digits until a non-digit character is found.
-fn try_parse_digits(s: &mut AsciiStr<'_>, x: &mut u64) {
+fn try_parse_digits(mut s: &[u8], mut x: u64) -> (&[u8], u64) {
// may cause overflows, to be handled later
- s.parse_digits(|digit| {
- *x = x.wrapping_mul(10).wrapping_add(digit as _);
+
+ while s.len() >= 8 {
+ let num = s.read_u64();
+ if is_8digits(num) {
+ x = x.wrapping_mul(1_0000_0000).wrapping_add(parse_8digits(num));
+ s = &s[8..];
+ } else {
+ break;
+ }
+ }
+
+ s = s.parse_digits(|digit| {
+ x = x.wrapping_mul(10).wrapping_add(digit as _);
});
+
+ (s, x)
}
/// Parse up to 19 digits (the max that can be stored in a 64-bit integer).
-fn try_parse_19digits(s: &mut AsciiStr<'_>, x: &mut u64) {
+fn try_parse_19digits(s_ref: &mut &[u8], x: &mut u64) {
+ let mut s = *s_ref;
+
while *x < MIN_19DIGIT_INT {
- if let Some(&c) = s.as_ref().first() {
+ // FIXME: Can't use s.split_first() here yet,
+ // see https://github.com/rust-lang/rust/issues/109328
+ if let [c, s_next @ ..] = s {
let digit = c.wrapping_sub(b'0');
+
if digit < 10 {
*x = (*x * 10) + digit as u64; // no overflows here
- // SAFETY: cannot be empty
- unsafe {
- s.step();
- }
+ s = s_next;
} else {
break;
}
@@ -51,46 +66,26 @@ fn try_parse_19digits(s: &mut AsciiStr<'_>, x: &mut u64) {
break;
}
}
-}
-/// Try to parse 8 digits at a time, using an optimized algorithm.
-fn try_parse_8digits(s: &mut AsciiStr<'_>, x: &mut u64) {
- // may cause overflows, to be handled later
- if let Some(v) = s.read_u64() {
- if is_8digits(v) {
- *x = x.wrapping_mul(1_0000_0000).wrapping_add(parse_8digits(v));
- // SAFETY: already ensured the buffer was >= 8 bytes in read_u64.
- unsafe {
- s.step_by(8);
- }
- if let Some(v) = s.read_u64() {
- if is_8digits(v) {
- *x = x.wrapping_mul(1_0000_0000).wrapping_add(parse_8digits(v));
- // SAFETY: already ensured the buffer was >= 8 bytes in try_read_u64.
- unsafe {
- s.step_by(8);
- }
- }
- }
- }
- }
+ *s_ref = s;
}
/// Parse the scientific notation component of a float.
-fn parse_scientific(s: &mut AsciiStr<'_>) -> Option<i64> {
- let mut exponent = 0_i64;
+fn parse_scientific(s_ref: &mut &[u8]) -> Option<i64> {
+ let mut exponent = 0i64;
let mut negative = false;
- if let Some(&c) = s.as_ref().get(0) {
+
+ let mut s = *s_ref;
+
+ if let Some((&c, s_next)) = s.split_first() {
negative = c == b'-';
if c == b'-' || c == b'+' {
- // SAFETY: s cannot be empty
- unsafe {
- s.step();
- }
+ s = s_next;
}
}
- if s.first_isdigit() {
- s.parse_digits(|digit| {
+
+ if matches!(s.first(), Some(&x) if x.is_ascii_digit()) {
+ *s_ref = s.parse_digits(|digit| {
// no overflows here, saturate well before overflow
if exponent < 0x10000 {
exponent = 10 * exponent + digit as i64;
@@ -98,6 +93,7 @@ fn parse_scientific(s: &mut AsciiStr<'_>) -> Option<i64> {
});
if negative { Some(-exponent) } else { Some(exponent) }
} else {
+ *s_ref = s;
None
}
}
@@ -106,28 +102,29 @@ fn parse_scientific(s: &mut AsciiStr<'_>) -> Option<i64> {
///
/// This creates a representation of the float as the
/// significant digits and the decimal exponent.
-fn parse_partial_number(s: &[u8], negative: bool) -> Option<(Number, usize)> {
- let mut s = AsciiStr::new(s);
- let start = s;
+fn parse_partial_number(mut s: &[u8]) -> Option<(Number, usize)> {
debug_assert!(!s.is_empty());
// parse initial digits before dot
let mut mantissa = 0_u64;
- let digits_start = s;
- try_parse_digits(&mut s, &mut mantissa);
- let mut n_digits = s.offset_from(&digits_start);
+ let start = s;
+ let tmp = try_parse_digits(s, mantissa);
+ s = tmp.0;
+ mantissa = tmp.1;
+ let mut n_digits = s.offset_from(start);
// handle dot with the following digits
let mut n_after_dot = 0;
let mut exponent = 0_i64;
let int_end = s;
- if s.first_is(b'.') {
- // SAFETY: s cannot be empty due to first_is
- unsafe { s.step() };
+
+ if let Some((&b'.', s_next)) = s.split_first() {
+ s = s_next;
let before = s;
- try_parse_8digits(&mut s, &mut mantissa);
- try_parse_digits(&mut s, &mut mantissa);
- n_after_dot = s.offset_from(&before);
+ let tmp = try_parse_digits(s, mantissa);
+ s = tmp.0;
+ mantissa = tmp.1;
+ n_after_dot = s.offset_from(before);
exponent = -n_after_dot as i64;
}
@@ -138,65 +135,60 @@ fn parse_partial_number(s: &[u8], negative: bool) -> Option<(Number, usize)> {
// handle scientific format
let mut exp_number = 0_i64;
- if s.first_is2(b'e', b'E') {
- // SAFETY: s cannot be empty
- unsafe {
- s.step();
+ if let Some((&c, s_next)) = s.split_first() {
+ if c == b'e' || c == b'E' {
+ s = s_next;
+ // If None, we have no trailing digits after exponent, or an invalid float.
+ exp_number = parse_scientific(&mut s)?;
+ exponent += exp_number;
}
- // If None, we have no trailing digits after exponent, or an invalid float.
- exp_number = parse_scientific(&mut s)?;
- exponent += exp_number;
}
- let len = s.offset_from(&start) as _;
+ let len = s.offset_from(start) as _;
// handle uncommon case with many digits
if n_digits <= 19 {
- return Some((Number { exponent, mantissa, negative, many_digits: false }, len));
+ return Some((Number { exponent, mantissa, negative: false, many_digits: false }, len));
}
n_digits -= 19;
let mut many_digits = false;
- let mut p = digits_start;
- while p.first_is2(b'0', b'.') {
- // SAFETY: p cannot be empty due to first_is2
- unsafe {
- // '0' = b'.' + 2
- n_digits -= p.first_unchecked().saturating_sub(b'0' - 1) as isize;
- p.step();
+ let mut p = start;
+ while let Some((&c, p_next)) = p.split_first() {
+ if c == b'.' || c == b'0' {
+ n_digits -= c.saturating_sub(b'0' - 1) as isize;
+ p = p_next;
+ } else {
+ break;
}
}
if n_digits > 0 {
// at this point we have more than 19 significant digits, let's try again
many_digits = true;
mantissa = 0;
- let mut s = digits_start;
+ let mut s = start;
try_parse_19digits(&mut s, &mut mantissa);
exponent = if mantissa >= MIN_19DIGIT_INT {
// big int
- int_end.offset_from(&s)
+ int_end.offset_from(s)
} else {
- // SAFETY: the next byte must be present and be '.'
- // We know this is true because we had more than 19
- // digits previously, so we overflowed a 64-bit integer,
- // but parsing only the integral digits produced less
- // than 19 digits. That means we must have a decimal
- // point, and at least 1 fractional digit.
- unsafe { s.step() };
+ s = &s[1..];
let before = s;
try_parse_19digits(&mut s, &mut mantissa);
- -s.offset_from(&before)
+ -s.offset_from(before)
} as i64;
// add back the explicit part
exponent += exp_number;
}
- Some((Number { exponent, mantissa, negative, many_digits }, len))
+ Some((Number { exponent, mantissa, negative: false, many_digits }, len))
}
-/// Try to parse a non-special floating point number.
-pub fn parse_number(s: &[u8], negative: bool) -> Option<Number> {
- if let Some((float, rest)) = parse_partial_number(s, negative) {
+/// Try to parse a non-special floating point number,
+/// as well as two slices with integer and fractional parts
+/// and the parsed exponent.
+pub fn parse_number(s: &[u8]) -> Option<Number> {
+ if let Some((float, rest)) = parse_partial_number(s) {
if rest == s.len() {
return Some(float);
}
@@ -204,30 +196,48 @@ pub fn parse_number(s: &[u8], negative: bool) -> Option<Number> {
None
}
-/// Parse a partial representation of a special, non-finite float.
-fn parse_partial_inf_nan<F: RawFloat>(s: &[u8]) -> Option<(F, usize)> {
- fn parse_inf_rest(s: &[u8]) -> usize {
- if s.len() >= 8 && s[3..].as_ref().starts_with_ignore_case(b"inity") { 8 } else { 3 }
- }
- if s.len() >= 3 {
- if s.starts_with_ignore_case(b"nan") {
- return Some((F::NAN, 3));
- } else if s.starts_with_ignore_case(b"inf") {
- return Some((F::INFINITY, parse_inf_rest(s)));
- }
- }
- None
-}
-
/// Try to parse a special, non-finite float.
-pub fn parse_inf_nan<F: RawFloat>(s: &[u8], negative: bool) -> Option<F> {
- if let Some((mut float, rest)) = parse_partial_inf_nan::<F>(s) {
- if rest == s.len() {
- if negative {
- float = -float;
- }
- return Some(float);
- }
+pub(crate) fn parse_inf_nan<F: RawFloat>(s: &[u8], negative: bool) -> Option<F> {
+ // Since a valid string has at most the length 8, we can load
+ // all relevant characters into a u64 and work from there.
+ // This also generates much better code.
+
+ let mut register;
+ let len: usize;
+
+ // All valid strings are either of length 8 or 3.
+ if s.len() == 8 {
+ register = s.read_u64();
+ len = 8;
+ } else if s.len() == 3 {
+ let a = s[0] as u64;
+ let b = s[1] as u64;
+ let c = s[2] as u64;
+ register = (c << 16) | (b << 8) | a;
+ len = 3;
+ } else {
+ return None;
}
- None
+
+ // Clear out the bits which turn ASCII uppercase characters into
+ // lowercase characters. The resulting string is all uppercase.
+ // What happens to other characters is irrelevant.
+ register &= 0xDFDFDFDFDFDFDFDF;
+
+ // u64 values corresponding to relevant cases
+ const INF_3: u64 = 0x464E49; // "INF"
+ const INF_8: u64 = 0x5954494E49464E49; // "INFINITY"
+ const NAN: u64 = 0x4E414E; // "NAN"
+
+ // Match register value to constant to parse string.
+ // Also match on the string length to catch edge cases
+ // like "inf\0\0\0\0\0".
+ let float = match (register, len) {
+ (INF_3, 3) => F::INFINITY,
+ (INF_8, 8) => F::INFINITY,
+ (NAN, 3) => F::NAN,
+ _ => return None,
+ };
+
+ if negative { Some(-float) } else { Some(float) }
}
diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs
index 1308b0770..1c6819b54 100644
--- a/library/core/src/num/f32.rs
+++ b/library/core/src/num/f32.rs
@@ -1391,7 +1391,7 @@ impl f32 {
#[stable(feature = "clamp", since = "1.50.0")]
#[inline]
pub fn clamp(mut self, min: f32, max: f32) -> f32 {
- assert!(min <= max);
+ assert!(min <= max, "min > max, or either was NaN. min = {min:?}, max = {max:?}");
if self < min {
self = min;
}
diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs
index 2a22c4302..1e7387217 100644
--- a/library/core/src/num/f64.rs
+++ b/library/core/src/num/f64.rs
@@ -1389,7 +1389,7 @@ impl f64 {
#[stable(feature = "clamp", since = "1.50.0")]
#[inline]
pub fn clamp(mut self, min: f64, max: f64) -> f64 {
- assert!(min <= max);
+ assert!(min <= max, "min > max, or either was NaN. min = {min:?}, max = {max:?}");
if self < min {
self = min;
}
diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs
index a50c91579..9b812bbfc 100644
--- a/library/core/src/num/mod.rs
+++ b/library/core/src/num/mod.rs
@@ -414,7 +414,7 @@ impl isize {
}
}
-/// If 6th bit set ascii is upper case.
+/// If 6th bit is set ascii is lower case.
const ASCII_CASE_MASK: u8 = 0b0010_0000;
impl u8 {
diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs
index fbda8f82b..49d23abee 100644
--- a/library/core/src/num/nonzero.rs
+++ b/library/core/src/num/nonzero.rs
@@ -34,6 +34,13 @@ macro_rules! nonzero_integers {
/// use std::mem::size_of;
#[doc = concat!("assert_eq!(size_of::<Option<core::num::", stringify!($Ty), ">>(), size_of::<", stringify!($Int), ">());")]
/// ```
+ ///
+ /// # Layout
+ ///
+ #[doc = concat!("`", stringify!($Ty), "` is guaranteed to have the same layout and bit validity as `", stringify!($Int), "`")]
+ /// with the exception that `0` is not a valid instance.
+ #[doc = concat!("`Option<", stringify!($Ty), ">` is guaranteed to be compatible with `", stringify!($Int), "`,")]
+ /// including in FFI.
#[$stability]
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[repr(transparent)]
@@ -1147,12 +1154,10 @@ macro_rules! nonzero_min_max_unsigned {
/// # Examples
///
/// ```
- /// #![feature(nonzero_min_max)]
- ///
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::MIN.get(), 1", stringify!($Int), ");")]
/// ```
- #[unstable(feature = "nonzero_min_max", issue = "89065")]
+ #[stable(feature = "nonzero_min_max", since = "1.70.0")]
pub const MIN: Self = Self::new(1).unwrap();
/// The largest value that can be represented by this non-zero
@@ -1162,12 +1167,10 @@ macro_rules! nonzero_min_max_unsigned {
/// # Examples
///
/// ```
- /// #![feature(nonzero_min_max)]
- ///
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::MAX.get(), ", stringify!($Int), "::MAX);")]
/// ```
- #[unstable(feature = "nonzero_min_max", issue = "89065")]
+ #[stable(feature = "nonzero_min_max", since = "1.70.0")]
pub const MAX: Self = Self::new(<$Int>::MAX).unwrap();
}
)+
@@ -1189,12 +1192,10 @@ macro_rules! nonzero_min_max_signed {
/// # Examples
///
/// ```
- /// #![feature(nonzero_min_max)]
- ///
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::MIN.get(), ", stringify!($Int), "::MIN);")]
/// ```
- #[unstable(feature = "nonzero_min_max", issue = "89065")]
+ #[stable(feature = "nonzero_min_max", since = "1.70.0")]
pub const MIN: Self = Self::new(<$Int>::MIN).unwrap();
/// The largest value that can be represented by this non-zero
@@ -1208,12 +1209,10 @@ macro_rules! nonzero_min_max_signed {
/// # Examples
///
/// ```
- /// #![feature(nonzero_min_max)]
- ///
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::MAX.get(), ", stringify!($Int), "::MAX);")]
/// ```
- #[unstable(feature = "nonzero_min_max", issue = "89065")]
+ #[stable(feature = "nonzero_min_max", since = "1.70.0")]
pub const MAX: Self = Self::new(<$Int>::MAX).unwrap();
}
)+
diff --git a/library/core/src/num/shells/u16.rs b/library/core/src/num/shells/u16.rs
index b203806f4..7394977e5 100644
--- a/library/core/src/num/shells/u16.rs
+++ b/library/core/src/num/shells/u16.rs
@@ -1,4 +1,4 @@
-//! Redundant constants module for the [`i16` primitive type][i16].
+//! Redundant constants module for the [`u16` primitive type][u16].
//!
//! New code should use the associated constants directly on the primitive type.
diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs
index 932038a0b..114deeea3 100644
--- a/library/core/src/num/uint_macros.rs
+++ b/library/core/src/num/uint_macros.rs
@@ -1363,12 +1363,11 @@ macro_rules! uint_impl {
///
/// Basic usage:
///
- /// Please note that this example is shared between integer types.
- /// Which explains why `i8` is used here.
- ///
/// ```
- /// assert_eq!(100i8.wrapping_neg(), -100);
- /// assert_eq!((-128i8).wrapping_neg(), -128);
+ #[doc = concat!("assert_eq!(0_", stringify!($SelfT), ".wrapping_neg(), 0);")]
+ #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.wrapping_neg(), 1);")]
+ #[doc = concat!("assert_eq!(13_", stringify!($SelfT), ".wrapping_neg(), (!13) + 1);")]
+ #[doc = concat!("assert_eq!(42_", stringify!($SelfT), ".wrapping_neg(), !(42 - 1));")]
/// ```
#[stable(feature = "num_wrapping", since = "1.2.0")]
#[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
diff --git a/library/core/src/ops/index_range.rs b/library/core/src/ops/index_range.rs
index 3e06776d2..265022a39 100644
--- a/library/core/src/ops/index_range.rs
+++ b/library/core/src/ops/index_range.rs
@@ -1,5 +1,6 @@
use crate::intrinsics::{assert_unsafe_precondition, unchecked_add, unchecked_sub};
use crate::iter::{FusedIterator, TrustedLen};
+use crate::num::NonZeroUsize;
/// Like a `Range<usize>`, but with a safety invariant that `start <= end`.
///
@@ -132,10 +133,9 @@ impl Iterator for IndexRange {
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- let original_len = self.len();
- self.take_prefix(n);
- if n > original_len { Err(original_len) } else { Ok(()) }
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ let taken = self.take_prefix(n);
+ NonZeroUsize::new(n - taken.len()).map_or(Ok(()), Err)
}
}
@@ -151,10 +151,9 @@ impl DoubleEndedIterator for IndexRange {
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
- let original_len = self.len();
- self.take_suffix(n);
- if n > original_len { Err(original_len) } else { Ok(()) }
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ let taken = self.take_suffix(n);
+ NonZeroUsize::new(n - taken.len()).map_or(Ok(()), Err)
}
}
diff --git a/library/core/src/ops/try_trait.rs b/library/core/src/ops/try_trait.rs
index 86aa1e4fd..c254803fb 100644
--- a/library/core/src/ops/try_trait.rs
+++ b/library/core/src/ops/try_trait.rs
@@ -392,14 +392,7 @@ impl<T> NeverShortCircuit<T> {
pub fn wrap_mut_2<A, B>(
mut f: impl ~const FnMut(A, B) -> T,
) -> impl ~const FnMut(A, B) -> Self {
- cfg_if! {
- if #[cfg(bootstrap)] {
- #[allow(unused_parens)]
- (const move |a, b| NeverShortCircuit(f(a, b)))
- } else {
- const move |a, b| NeverShortCircuit(f(a, b))
- }
- }
+ const move |a, b| NeverShortCircuit(f(a, b))
}
}
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index 994c08d1f..6f7bc6ed2 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -559,6 +559,7 @@ use crate::{
/// The `Option` type. See [the module level documentation](self) for more.
#[derive(Copy, PartialOrd, Eq, Ord, Debug, Hash)]
#[rustc_diagnostic_item = "Option"]
+#[cfg_attr(not(bootstrap), lang = "Option")]
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Option<T> {
/// No value.
@@ -604,8 +605,6 @@ impl<T> Option<T> {
/// # Examples
///
/// ```
- /// #![feature(is_some_and)]
- ///
/// let x: Option<u32> = Some(2);
/// assert_eq!(x.is_some_and(|x| x > 1), true);
///
@@ -617,7 +616,7 @@ impl<T> Option<T> {
/// ```
#[must_use]
#[inline]
- #[unstable(feature = "is_some_and", issue = "93050")]
+ #[stable(feature = "is_some_and", since = "1.70.0")]
pub fn is_some_and(self, f: impl FnOnce(T) -> bool) -> bool {
match self {
None => false,
@@ -735,23 +734,6 @@ impl<T> Option<T> {
}
}
- const fn get_some_offset() -> isize {
- if mem::size_of::<Option<T>>() == mem::size_of::<T>() {
- // niche optimization means the `T` is always stored at the same position as the Option.
- 0
- } else {
- assert!(mem::size_of::<Option<T>>() == mem::size_of::<Option<mem::MaybeUninit<T>>>());
- let some_uninit = Some(mem::MaybeUninit::<T>::uninit());
- // SAFETY: This gets the byte offset of the `Some(_)` value following the fact that
- // niche optimization is not active, and thus Option<T> and Option<MaybeUninit<t>> share
- // the same layout.
- unsafe {
- (some_uninit.as_ref().unwrap() as *const mem::MaybeUninit<T>)
- .byte_offset_from(&some_uninit as *const Option<mem::MaybeUninit<T>>)
- }
- }
- }
-
/// Returns a slice of the contained value, if any. If this is `None`, an
/// empty slice is returned. This can be useful to have a single type of
/// iterator over an `Option` or slice.
@@ -784,16 +766,27 @@ impl<T> Option<T> {
#[must_use]
#[unstable(feature = "option_as_slice", issue = "108545")]
pub fn as_slice(&self) -> &[T] {
- // SAFETY: This is sound as long as `get_some_offset` returns the
- // correct offset. Though in the `None` case, the slice may be located
- // at a pointer pointing into padding, the fact that the slice is
- // empty, and the padding is at a properly aligned position for a
- // value of that type makes it sound.
+ #[cfg(bootstrap)]
+ match self {
+ Some(value) => slice::from_ref(value),
+ None => &[],
+ }
+
+ #[cfg(not(bootstrap))]
+ // SAFETY: When the `Option` is `Some`, we're using the actual pointer
+ // to the payload, with a length of 1, so this is equivalent to
+ // `slice::from_ref`, and thus is safe.
+ // When the `Option` is `None`, the length used is 0, so to be safe it
+ // just needs to be aligned, which it is because `&self` is aligned and
+ // the offset used is a multiple of alignment.
+ //
+ // In the new version, the intrinsic always returns a pointer to an
+ // in-bounds and correctly aligned position for a `T` (even if in the
+ // `None` case it's just padding).
unsafe {
slice::from_raw_parts(
- (self as *const Option<T>).wrapping_byte_offset(Self::get_some_offset())
- as *const T,
- self.is_some() as usize,
+ crate::intrinsics::option_payload_ptr(crate::ptr::from_ref(self)),
+ usize::from(self.is_some()),
)
}
}
@@ -840,15 +833,30 @@ impl<T> Option<T> {
#[must_use]
#[unstable(feature = "option_as_slice", issue = "108545")]
pub fn as_mut_slice(&mut self) -> &mut [T] {
- // SAFETY: This is sound as long as `get_some_offset` returns the
- // correct offset. Though in the `None` case, the slice may be located
- // at a pointer pointing into padding, the fact that the slice is
- // empty, and the padding is at a properly aligned position for a
- // value of that type makes it sound.
+ #[cfg(bootstrap)]
+ match self {
+ Some(value) => slice::from_mut(value),
+ None => &mut [],
+ }
+
+ #[cfg(not(bootstrap))]
+ // SAFETY: When the `Option` is `Some`, we're using the actual pointer
+ // to the payload, with a length of 1, so this is equivalent to
+ // `slice::from_mut`, and thus is safe.
+ // When the `Option` is `None`, the length used is 0, so to be safe it
+ // just needs to be aligned, which it is because `&self` is aligned and
+ // the offset used is a multiple of alignment.
+ //
+ // In the new version, the intrinsic creates a `*const T` from a
+ // mutable reference so it is safe to cast back to a mutable pointer
+ // here. As with `as_slice`, the intrinsic always returns a pointer to
+ // an in-bounds and correctly aligned position for a `T` (even if in
+ // the `None` case it's just padding).
unsafe {
slice::from_raw_parts_mut(
- (self as *mut Option<T>).wrapping_byte_offset(Self::get_some_offset()) as *mut T,
- self.is_some() as usize,
+ crate::intrinsics::option_payload_ptr(crate::ptr::from_mut(self).cast_const())
+ .cast_mut(),
+ usize::from(self.is_some()),
)
}
}
@@ -1264,6 +1272,7 @@ impl<T> Option<T> {
/// let x: Option<String> = None;
/// assert_eq!(x.as_deref(), None);
/// ```
+ #[inline]
#[stable(feature = "option_deref", since = "1.40.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
pub const fn as_deref(&self) -> Option<&T::Target>
@@ -1290,6 +1299,7 @@ impl<T> Option<T> {
/// x
/// }), Some("HEY".to_owned().as_mut_str()));
/// ```
+ #[inline]
#[stable(feature = "option_deref", since = "1.40.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
pub const fn as_deref_mut(&mut self) -> Option<&mut T::Target>
@@ -1768,36 +1778,6 @@ impl<T> Option<T> {
mem::replace(self, Some(value))
}
- /// Returns `true` if the option is a [`Some`] value containing the given value.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(option_result_contains)]
- ///
- /// let x: Option<u32> = Some(2);
- /// assert_eq!(x.contains(&2), true);
- ///
- /// let x: Option<u32> = Some(3);
- /// assert_eq!(x.contains(&2), false);
- ///
- /// let x: Option<u32> = None;
- /// assert_eq!(x.contains(&2), false);
- /// ```
- #[must_use]
- #[inline]
- #[unstable(feature = "option_result_contains", issue = "62358")]
- #[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
- pub const fn contains<U>(&self, x: &U) -> bool
- where
- U: ~const PartialEq<T>,
- {
- match self {
- Some(y) => x.eq(y),
- None => false,
- }
- }
-
/// Zips `self` with another `Option`.
///
/// If `self` is `Some(s)` and `other` is `Some(o)`, this method returns `Some((s, o))`.
diff --git a/library/core/src/panic/panic_info.rs b/library/core/src/panic/panic_info.rs
index 0d385c9d1..06fbe083c 100644
--- a/library/core/src/panic/panic_info.rs
+++ b/library/core/src/panic/panic_info.rs
@@ -15,14 +15,10 @@ use crate::panic::Location;
/// use std::panic;
///
/// panic::set_hook(Box::new(|panic_info| {
-/// if let Some(s) = panic_info.payload().downcast_ref::<&str>() {
-/// println!("panic occurred: {s:?}");
-/// } else {
-/// println!("panic occurred");
-/// }
+/// println!("panic occurred: {panic_info}");
/// }));
///
-/// panic!("Normal panic");
+/// panic!("critical system failure");
/// ```
#[lang = "panic_info"]
#[stable(feature = "panic_hooks", since = "1.10.0")]
diff --git a/library/core/src/panic/unwind_safe.rs b/library/core/src/panic/unwind_safe.rs
index 9a6153f12..7e7b6b4db 100644
--- a/library/core/src/panic/unwind_safe.rs
+++ b/library/core/src/panic/unwind_safe.rs
@@ -28,7 +28,7 @@ use crate::task::{Context, Poll};
/// 2. This broken invariant is then later observed.
///
/// Typically in Rust, it is difficult to perform step (2) because catching a
-/// panic involves either spawning a thread (which in turns makes it difficult
+/// panic involves either spawning a thread (which in turn makes it difficult
/// to later witness broken invariants) or using the `catch_unwind` function in this
/// module. Additionally, even if an invariant is witnessed, it typically isn't a
/// problem in Rust because there are no uninitialized values (like in C or C++).
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
index 805a1e51a..efeb726ab 100644
--- a/library/core/src/panicking.rs
+++ b/library/core/src/panicking.rs
@@ -29,6 +29,9 @@
use crate::fmt;
use crate::panic::{Location, PanicInfo};
+#[cfg(feature = "panic_immediate_abort")]
+const _: () = assert!(cfg!(panic = "abort"), "panic_immediate_abort requires -C panic=abort");
+
// First we define the two main entry points that all panics go through.
// In the end both are just convenience wrappers around `panic_impl`.
@@ -111,7 +114,7 @@ pub const fn panic(expr: &'static str) -> ! {
// truncation and padding (even though none is used here). Using
// Arguments::new_v1 may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
- panic_fmt(fmt::Arguments::new_v1(&[expr], &[]));
+ panic_fmt(fmt::Arguments::new_const(&[expr]));
}
/// Like `panic`, but without unwinding and track_caller to reduce the impact on codesize.
@@ -120,7 +123,7 @@ pub const fn panic(expr: &'static str) -> ! {
#[lang = "panic_nounwind"] // needed by codegen for non-unwinding panics
#[rustc_nounwind]
pub fn panic_nounwind(expr: &'static str) -> ! {
- panic_nounwind_fmt(fmt::Arguments::new_v1(&[expr], &[]));
+ panic_nounwind_fmt(fmt::Arguments::new_const(&[expr]));
}
#[inline]
@@ -159,6 +162,20 @@ fn panic_bounds_check(index: usize, len: usize) -> ! {
panic!("index out of bounds: the len is {len} but the index is {index}")
}
+#[cold]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[track_caller]
+#[cfg_attr(not(bootstrap), lang = "panic_misaligned_pointer_dereference")] // needed by codegen for panic on misaligned pointer deref
+fn panic_misaligned_pointer_dereference(required: usize, found: usize) -> ! {
+ if cfg!(feature = "panic_immediate_abort") {
+ super::intrinsics::abort()
+ }
+
+ panic!(
+ "misaligned pointer dereference: address must be a multiple of {required:#x} but is {found:#x}"
+ )
+}
+
/// Panic because we cannot unwind out of a function.
///
/// This function is called directly by the codegen backend, and must not have
diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs
index febe57dc9..c4b89a630 100644
--- a/library/core/src/pin.rs
+++ b/library/core/src/pin.rs
@@ -1003,22 +1003,25 @@ impl<P, U> CoerceUnsized<Pin<U>> for Pin<P> where P: CoerceUnsized<U> {}
#[stable(feature = "pin", since = "1.33.0")]
impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
-/// Constructs a <code>[Pin]<[&mut] T></code>, by pinning[^1] a `value: T` _locally_[^2].
+/// Constructs a <code>[Pin]<[&mut] T></code>, by pinning a `value: T` locally.
///
-/// Unlike [`Box::pin`], this does not involve a heap allocation.
+/// Unlike [`Box::pin`], this does not create a new heap allocation. As explained
+/// below, the element might still end up on the heap however.
///
-/// [^1]: If the (type `T` of the) given value does not implement [`Unpin`], then this
-/// effectively pins the `value` in memory, where it will be unable to be moved.
-/// Otherwise, <code>[Pin]<[&mut] T></code> behaves like <code>[&mut] T</code>, and operations such
-/// as [`mem::replace()`][crate::mem::replace] will allow extracting that value, and therefore,
-/// moving it.
-/// See [the `Unpin` section of the `pin` module][self#unpin] for more info.
+/// The local pinning performed by this macro is usually dubbed "stack"-pinning.
+/// Outside of `async` contexts locals do indeed get stored on the stack. In
+/// `async` functions or blocks however, any locals crossing an `.await` point
+/// are part of the state captured by the `Future`, and will use the storage of
+/// those. That storage can either be on the heap or on the stack. Therefore,
+/// local pinning is a more accurate term.
///
-/// [^2]: This is usually dubbed "stack"-pinning. And whilst local values are almost always located
-/// in the stack (_e.g._, when within the body of a non-`async` function), the truth is that inside
-/// the body of an `async fn` or block —more generally, the body of a generator— any locals crossing
-/// an `.await` point —a `yield` point— end up being part of the state captured by the `Future` —by
-/// the `Generator`—, and thus will be stored wherever that one is.
+/// If the type of the given value does not implement [`Unpin`], then this macro
+/// pins the value in memory in a way that prevents moves. On the other hand,
+/// if the type does implement [`Unpin`], <code>[Pin]<[&mut] T></code> behaves
+/// like <code>[&mut] T</code>, and operations such as
+/// [`mem::replace()`][crate::mem::replace] or [`mem::take()`](crate::mem::take)
+/// will allow moves of the value.
+/// See [the `Unpin` section of the `pin` module][self#unpin] for details.
///
/// ## Examples
///
@@ -1158,9 +1161,9 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
///
/// If you really need to return a pinned value, consider using [`Box::pin`] instead.
///
-/// On the other hand, pinning to the stack[<sup>2</sup>](#fn2) using [`pin!`] is likely to be
-/// cheaper than pinning into a fresh heap allocation using [`Box::pin`]. Moreover, by virtue of not
-/// even needing an allocator, [`pin!`] is the main non-`unsafe` `#![no_std]`-compatible [`Pin`]
+/// On the other hand, local pinning using [`pin!`] is likely to be cheaper than
+/// pinning into a fresh heap allocation using [`Box::pin`]. Moreover, by virtue of not
+/// requiring an allocator, [`pin!`] is the main non-`unsafe` `#![no_std]`-compatible [`Pin`]
/// constructor.
///
/// [`Box::pin`]: ../../std/boxed/struct.Box.html#method.pin
diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs
index 6f78811a1..3df990e5d 100644
--- a/library/core/src/primitive_docs.rs
+++ b/library/core/src/primitive_docs.rs
@@ -1,7 +1,8 @@
// `library/{std,core}/src/primitive_docs.rs` should have the same contents.
// These are different files so that relative links work properly without
// having to have `CARGO_PKG_NAME` set, but conceptually they should always be the same.
-#[doc(primitive = "bool")]
+#[cfg_attr(bootstrap, doc(primitive = "bool"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "bool")]
#[doc(alias = "true")]
#[doc(alias = "false")]
/// The boolean type.
@@ -63,7 +64,8 @@
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_bool {}
-#[doc(primitive = "never")]
+#[cfg_attr(bootstrap, doc(primitive = "never"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "never")]
#[doc(alias = "!")]
//
/// The `!` type, also called "never".
@@ -274,7 +276,8 @@ mod prim_bool {}
#[unstable(feature = "never_type", issue = "35121")]
mod prim_never {}
-#[doc(primitive = "char")]
+#[cfg_attr(bootstrap, doc(primitive = "char"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "char")]
#[allow(rustdoc::invalid_rust_codeblocks)]
/// A character type.
///
@@ -398,7 +401,8 @@ mod prim_never {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_char {}
-#[doc(primitive = "unit")]
+#[cfg_attr(bootstrap, doc(primitive = "unit"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "unit")]
#[doc(alias = "(")]
#[doc(alias = ")")]
#[doc(alias = "()")]
@@ -460,7 +464,8 @@ impl Copy for () {
// empty
}
-#[doc(primitive = "pointer")]
+#[cfg_attr(bootstrap, doc(primitive = "pointer"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "pointer")]
#[doc(alias = "ptr")]
#[doc(alias = "*")]
#[doc(alias = "*const")]
@@ -572,12 +577,12 @@ impl Copy for () {
/// [`is_null`]: pointer::is_null
/// [`offset`]: pointer::offset
#[doc = concat!("[`into_raw`]: ", include_str!("../primitive_docs/box_into_raw.md"))]
-/// [`drop`]: mem::drop
/// [`write`]: ptr::write
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_pointer {}
-#[doc(primitive = "array")]
+#[cfg_attr(bootstrap, doc(primitive = "array"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "array")]
#[doc(alias = "[]")]
#[doc(alias = "[T;N]")] // unfortunately, rustdoc doesn't have fuzzy search for aliases
#[doc(alias = "[T; N]")]
@@ -778,7 +783,8 @@ mod prim_pointer {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_array {}
-#[doc(primitive = "slice")]
+#[cfg_attr(bootstrap, doc(primitive = "slice"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "slice")]
#[doc(alias = "[")]
#[doc(alias = "]")]
#[doc(alias = "[]")]
@@ -870,7 +876,8 @@ mod prim_array {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_slice {}
-#[doc(primitive = "str")]
+#[cfg_attr(bootstrap, doc(primitive = "str"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "str")]
/// String slices.
///
/// *[See also the `std::str` module](crate::str).*
@@ -937,7 +944,8 @@ mod prim_slice {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_str {}
-#[doc(primitive = "tuple")]
+#[cfg_attr(bootstrap, doc(primitive = "tuple"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "tuple")]
#[doc(alias = "(")]
#[doc(alias = ")")]
#[doc(alias = "()")]
@@ -1017,7 +1025,6 @@ mod prim_str {}
/// * [`UnwindSafe`]
/// * [`RefUnwindSafe`]
///
-/// [`Unpin`]: marker::Unpin
/// [`UnwindSafe`]: panic::UnwindSafe
/// [`RefUnwindSafe`]: panic::RefUnwindSafe
///
@@ -1081,7 +1088,8 @@ impl<T: Copy> Copy for (T,) {
// empty
}
-#[doc(primitive = "f32")]
+#[cfg_attr(bootstrap, doc(primitive = "f32"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "f32")]
/// A 32-bit floating point type (specifically, the "binary32" type defined in IEEE 754-2008).
///
/// This type can represent a wide range of decimal numbers, like `3.5`, `27`,
@@ -1110,7 +1118,7 @@ impl<T: Copy> Copy for (T,) {
/// - [NaN (not a number)](#associatedconstant.NAN): this value results from
/// calculations like `(-1.0).sqrt()`. NaN has some potentially unexpected
/// behavior:
-/// - It is unequal to any float, including itself! This is the reason `f32`
+/// - It is not equal to any float, including itself! This is the reason `f32`
/// doesn't implement the `Eq` trait.
/// - It is also neither smaller nor greater than any float, making it
/// impossible to sort by the default comparison operation, which is the
@@ -1147,7 +1155,8 @@ impl<T: Copy> Copy for (T,) {
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_f32 {}
-#[doc(primitive = "f64")]
+#[cfg_attr(bootstrap, doc(primitive = "f64"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "f64")]
/// A 64-bit floating point type (specifically, the "binary64" type defined in IEEE 754-2008).
///
/// This type is very similar to [`f32`], but has increased
@@ -1162,67 +1171,78 @@ mod prim_f32 {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_f64 {}
-#[doc(primitive = "i8")]
+#[cfg_attr(bootstrap, doc(primitive = "i8"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "i8")]
//
/// The 8-bit signed integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_i8 {}
-#[doc(primitive = "i16")]
+#[cfg_attr(bootstrap, doc(primitive = "i16"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "i16")]
//
/// The 16-bit signed integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_i16 {}
-#[doc(primitive = "i32")]
+#[cfg_attr(bootstrap, doc(primitive = "i32"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "i32")]
//
/// The 32-bit signed integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_i32 {}
-#[doc(primitive = "i64")]
+#[cfg_attr(bootstrap, doc(primitive = "i64"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "i64")]
//
/// The 64-bit signed integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_i64 {}
-#[doc(primitive = "i128")]
+#[cfg_attr(bootstrap, doc(primitive = "i128"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "i128")]
//
/// The 128-bit signed integer type.
#[stable(feature = "i128", since = "1.26.0")]
mod prim_i128 {}
-#[doc(primitive = "u8")]
+#[cfg_attr(bootstrap, doc(primitive = "u8"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "u8")]
//
/// The 8-bit unsigned integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_u8 {}
-#[doc(primitive = "u16")]
+#[cfg_attr(bootstrap, doc(primitive = "u16"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "u16")]
//
/// The 16-bit unsigned integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_u16 {}
-#[doc(primitive = "u32")]
+#[cfg_attr(bootstrap, doc(primitive = "u32"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "u32")]
//
/// The 32-bit unsigned integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_u32 {}
-#[doc(primitive = "u64")]
+#[cfg_attr(bootstrap, doc(primitive = "u64"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "u64")]
//
/// The 64-bit unsigned integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_u64 {}
-#[doc(primitive = "u128")]
+#[cfg_attr(bootstrap, doc(primitive = "u128"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "u128")]
//
/// The 128-bit unsigned integer type.
#[stable(feature = "i128", since = "1.26.0")]
mod prim_u128 {}
-#[doc(primitive = "isize")]
+#[cfg_attr(bootstrap, doc(primitive = "isize"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "isize")]
//
/// The pointer-sized signed integer type.
///
@@ -1232,7 +1252,8 @@ mod prim_u128 {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_isize {}
-#[doc(primitive = "usize")]
+#[cfg_attr(bootstrap, doc(primitive = "usize"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "usize")]
//
/// The pointer-sized unsigned integer type.
///
@@ -1242,7 +1263,8 @@ mod prim_isize {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_usize {}
-#[doc(primitive = "reference")]
+#[cfg_attr(bootstrap, doc(primitive = "reference"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "reference")]
#[doc(alias = "&")]
#[doc(alias = "&mut")]
//
@@ -1338,6 +1360,7 @@ mod prim_usize {}
/// * [`Hash`]
/// * [`ToSocketAddrs`]
/// * [`Send`] \(`&T` references also require <code>T: [Sync]</code>)
+/// * [`Sync`]
///
/// [`std::fmt`]: fmt
/// [`Hash`]: hash::Hash
@@ -1373,16 +1396,13 @@ mod prim_usize {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_ref {}
-#[doc(primitive = "fn")]
+#[cfg_attr(bootstrap, doc(primitive = "fn"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "fn")]
//
/// Function pointers, like `fn(usize) -> bool`.
///
/// *See also the traits [`Fn`], [`FnMut`], and [`FnOnce`].*
///
-/// [`Fn`]: ops::Fn
-/// [`FnMut`]: ops::FnMut
-/// [`FnOnce`]: ops::FnOnce
-///
/// Function pointers are pointers that point to *code*, not data. They can be called
/// just like functions. Like references, function pointers are, among other things, assumed to
/// not be null, so if you want to pass a function pointer over FFI and be able to accommodate null
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 57e2ffe5d..839afc57f 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -61,14 +61,14 @@ impl<T: ?Sized> *const T {
/// Use the pointer value in a new pointer of another type.
///
- /// In case `val` is a (fat) pointer to an unsized type, this operation
+ /// In case `meta` is a (fat) pointer to an unsized type, this operation
/// will ignore the pointer part, whereas for (thin) pointers to sized
/// types, this has the same effect as a simple cast.
///
/// The resulting pointer will have provenance of `self`, i.e., for a fat
/// pointer, this operation is semantically the same as creating a new
/// fat pointer with the data pointer value of `self` but the metadata of
- /// `val`.
+ /// `meta`.
///
/// # Examples
///
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 1ad9af154..818f1a919 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -691,7 +691,7 @@ where
#[inline(always)]
#[must_use]
#[unstable(feature = "ptr_from_ref", issue = "106116")]
-pub fn from_ref<T: ?Sized>(r: &T) -> *const T {
+pub const fn from_ref<T: ?Sized>(r: &T) -> *const T {
r
}
@@ -702,7 +702,7 @@ pub fn from_ref<T: ?Sized>(r: &T) -> *const T {
#[inline(always)]
#[must_use]
#[unstable(feature = "ptr_from_ref", issue = "106116")]
-pub fn from_mut<T: ?Sized>(r: &mut T) -> *mut T {
+pub const fn from_mut<T: ?Sized>(r: &mut T) -> *mut T {
r
}
@@ -1135,27 +1135,58 @@ pub const unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
#[rustc_const_unstable(feature = "const_ptr_read", issue = "80377")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn read<T>(src: *const T) -> T {
- // We are calling the intrinsics directly to avoid function calls in the generated code
- // as `intrinsics::copy_nonoverlapping` is a wrapper function.
- extern "rust-intrinsic" {
- #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
- fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
- }
+ // It would be semantically correct to implement this via `copy_nonoverlapping`
+ // and `MaybeUninit`, as was done before PR #109035. Calling `assume_init`
+ // provides enough information to know that this is a typed operation.
- let mut tmp = MaybeUninit::<T>::uninit();
- // SAFETY: the caller must guarantee that `src` is valid for reads.
- // `src` cannot overlap `tmp` because `tmp` was just allocated on
- // the stack as a separate allocated object.
+ // However, as of March 2023 the compiler was not capable of taking advantage
+ // of that information. Thus the implementation here switched to an intrinsic,
+ // which lowers to `_0 = *src` in MIR, to address a few issues:
//
- // Also, since we just wrote a valid value into `tmp`, it is guaranteed
- // to be properly initialized.
+ // - Using `MaybeUninit::assume_init` after a `copy_nonoverlapping` was not
+ // turning the untyped copy into a typed load. As such, the generated
+ // `load` in LLVM didn't get various metadata, such as `!range` (#73258),
+ // `!nonnull`, and `!noundef`, resulting in poorer optimization.
+ // - Going through the extra local resulted in multiple extra copies, even
+ // in optimized MIR. (Ignoring StorageLive/Dead, the intrinsic is one
+ // MIR statement, while the previous implementation was eight.) LLVM
+ // could sometimes optimize them away, but because `read` is at the core
+ // of so many things, not having them in the first place improves what we
+ // hand off to the backend. For example, `mem::replace::<Big>` previously
+ // emitted 4 `alloca` and 6 `memcpy`s, but is now 1 `alloc` and 3 `memcpy`s.
+ // - In general, this approach keeps us from getting any more bugs (like
+ // #106369) that boil down to "`read(p)` is worse than `*p`", as this
+ // makes them look identical to the backend (or other MIR consumers).
+ //
+ // Future enhancements to MIR optimizations might well allow this to return
+ // to the previous implementation, rather than using an intrinsic.
+
+ // SAFETY: the caller must guarantee that `src` is valid for reads.
unsafe {
assert_unsafe_precondition!(
"ptr::read requires that the pointer argument is aligned and non-null",
[T](src: *const T) => is_aligned_and_not_null(src)
);
- copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
- tmp.assume_init()
+
+ #[cfg(bootstrap)]
+ {
+ // We are calling the intrinsics directly to avoid function calls in the
+ // generated code as `intrinsics::copy_nonoverlapping` is a wrapper function.
+ extern "rust-intrinsic" {
+ #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ }
+
+ // `src` cannot overlap `tmp` because `tmp` was just allocated on
+ // the stack as a separate allocated object.
+ let mut tmp = MaybeUninit::<T>::uninit();
+ copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
+ tmp.assume_init()
+ }
+ #[cfg(not(bootstrap))]
+ {
+ crate::intrinsics::read_via_copy(src)
+ }
}
}
@@ -1340,6 +1371,7 @@ pub const unsafe fn write<T>(dst: *mut T, src: T) {
// as `intrinsics::copy_nonoverlapping` is a wrapper function.
extern "rust-intrinsic" {
#[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
+ #[rustc_nounwind]
fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
}
@@ -1860,150 +1892,205 @@ pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) {
hashee.hash(into);
}
-// If this is a unary fn pointer, it adds a doc comment.
-// Otherwise, it hides the docs entirely.
-macro_rules! maybe_fnptr_doc {
- (@ #[$meta:meta] $item:item) => {
- #[doc(hidden)]
- #[$meta]
- $item
- };
- ($a:ident @ #[$meta:meta] $item:item) => {
- #[doc(fake_variadic)]
- #[doc = "This trait is implemented for function pointers with up to twelve arguments."]
- #[$meta]
- $item
- };
- ($a:ident $($rest_a:ident)+ @ #[$meta:meta] $item:item) => {
- #[doc(hidden)]
- #[$meta]
- $item
- };
-}
+#[cfg(bootstrap)]
+mod old_fn_ptr_impl {
+ use super::*;
+ // If this is a unary fn pointer, it adds a doc comment.
+ // Otherwise, it hides the docs entirely.
+ macro_rules! maybe_fnptr_doc {
+ (@ #[$meta:meta] $item:item) => {
+ #[doc(hidden)]
+ #[$meta]
+ $item
+ };
+ ($a:ident @ #[$meta:meta] $item:item) => {
+ #[doc(fake_variadic)]
+ #[doc = "This trait is implemented for function pointers with up to twelve arguments."]
+ #[$meta]
+ $item
+ };
+ ($a:ident $($rest_a:ident)+ @ #[$meta:meta] $item:item) => {
+ #[doc(hidden)]
+ #[$meta]
+ $item
+ };
+ }
-// FIXME(strict_provenance_magic): function pointers have buggy codegen that
-// necessitates casting to a usize to get the backend to do the right thing.
-// for now I will break AVR to silence *a billion* lints. We should probably
-// have a proper "opaque function pointer type" to handle this kind of thing.
+ // FIXME(strict_provenance_magic): function pointers have buggy codegen that
+ // necessitates casting to a usize to get the backend to do the right thing.
+ // for now I will break AVR to silence *a billion* lints. We should probably
+ // have a proper "opaque function pointer type" to handle this kind of thing.
-// Impls for function pointers
-macro_rules! fnptr_impls_safety_abi {
- ($FnTy: ty, $($Arg: ident),*) => {
+ // Impls for function pointers
+ macro_rules! fnptr_impls_safety_abi {
+ ($FnTy: ty, $($Arg: ident),*) => {
fnptr_impls_safety_abi! { #[stable(feature = "fnptr_impls", since = "1.4.0")] $FnTy, $($Arg),* }
};
(@c_unwind $FnTy: ty, $($Arg: ident),*) => {
fnptr_impls_safety_abi! { #[unstable(feature = "c_unwind", issue = "74990")] $FnTy, $($Arg),* }
};
(#[$meta:meta] $FnTy: ty, $($Arg: ident),*) => {
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> PartialEq for $FnTy {
- #[inline]
- fn eq(&self, other: &Self) -> bool {
- *self as usize == *other as usize
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[$meta]
+ impl<Ret, $($Arg),*> PartialEq for $FnTy {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ *self as usize == *other as usize
+ }
}
}
- }
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> Eq for $FnTy {}
- }
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[$meta]
+ impl<Ret, $($Arg),*> Eq for $FnTy {}
+ }
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> PartialOrd for $FnTy {
- #[inline]
- fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- (*self as usize).partial_cmp(&(*other as usize))
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[$meta]
+ impl<Ret, $($Arg),*> PartialOrd for $FnTy {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ (*self as usize).partial_cmp(&(*other as usize))
+ }
}
}
- }
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> Ord for $FnTy {
- #[inline]
- fn cmp(&self, other: &Self) -> Ordering {
- (*self as usize).cmp(&(*other as usize))
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[$meta]
+ impl<Ret, $($Arg),*> Ord for $FnTy {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ (*self as usize).cmp(&(*other as usize))
+ }
}
}
- }
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> hash::Hash for $FnTy {
- fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
- state.write_usize(*self as usize)
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[$meta]
+ impl<Ret, $($Arg),*> hash::Hash for $FnTy {
+ fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
+ state.write_usize(*self as usize)
+ }
}
}
- }
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::pointer_fmt_inner(*self as usize, f)
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[$meta]
+ impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::pointer_fmt_inner(*self as usize, f)
+ }
}
}
- }
- maybe_fnptr_doc! {
- $($Arg)* @
- #[$meta]
- impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::pointer_fmt_inner(*self as usize, f)
+ maybe_fnptr_doc! {
+ $($Arg)* @
+ #[$meta]
+ impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::pointer_fmt_inner(*self as usize, f)
+ }
}
}
}
}
-}
-macro_rules! fnptr_impls_args {
- ($($Arg: ident),+) => {
- fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
+ macro_rules! fnptr_impls_args {
+ ($($Arg: ident),+) => {
+ fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn($($Arg),+) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
- fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn($($Arg),+) -> Ret, $($Arg),+ }
fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
- };
- () => {
- // No variadic functions with 0 parameters
- fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
- fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
+ };
+ () => {
+ // No variadic functions with 0 parameters
+ fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
+ fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
fnptr_impls_safety_abi! { @c_unwind extern "C-unwind" fn() -> Ret, }
- fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
- fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
+ fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
+ fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
fnptr_impls_safety_abi! { @c_unwind unsafe extern "C-unwind" fn() -> Ret, }
- };
+ };
+ }
+
+ fnptr_impls_args! {}
+ fnptr_impls_args! { T }
+ fnptr_impls_args! { A, B }
+ fnptr_impls_args! { A, B, C }
+ fnptr_impls_args! { A, B, C, D }
+ fnptr_impls_args! { A, B, C, D, E }
+ fnptr_impls_args! { A, B, C, D, E, F }
+ fnptr_impls_args! { A, B, C, D, E, F, G }
+ fnptr_impls_args! { A, B, C, D, E, F, G, H }
+ fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
+ fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
+ fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
+ fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
}
-fnptr_impls_args! {}
-fnptr_impls_args! { T }
-fnptr_impls_args! { A, B }
-fnptr_impls_args! { A, B, C }
-fnptr_impls_args! { A, B, C, D }
-fnptr_impls_args! { A, B, C, D, E }
-fnptr_impls_args! { A, B, C, D, E, F }
-fnptr_impls_args! { A, B, C, D, E, F, G }
-fnptr_impls_args! { A, B, C, D, E, F, G, H }
-fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
-fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
-fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
-fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
+#[cfg(not(bootstrap))]
+mod new_fn_ptr_impl {
+ use super::*;
+ use crate::marker::FnPtr;
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<F: FnPtr> PartialEq for F {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.addr() == other.addr()
+ }
+ }
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<F: FnPtr> Eq for F {}
+
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<F: FnPtr> PartialOrd for F {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.addr().partial_cmp(&other.addr())
+ }
+ }
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<F: FnPtr> Ord for F {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.addr().cmp(&other.addr())
+ }
+ }
+
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<F: FnPtr> hash::Hash for F {
+ fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
+ state.write_usize(self.addr() as _)
+ }
+ }
+
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<F: FnPtr> fmt::Pointer for F {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::pointer_fmt_inner(self.addr() as _, f)
+ }
+ }
+
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<F: FnPtr> fmt::Debug for F {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::pointer_fmt_inner(self.addr() as _, f)
+ }
+ }
+}
/// Create a `const` raw pointer to a place, without creating an intermediate reference.
///
/// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index 422d0f2b8..ece5244e9 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -60,14 +60,14 @@ impl<T: ?Sized> *mut T {
/// Use the pointer value in a new pointer of another type.
///
- /// In case `val` is a (fat) pointer to an unsized type, this operation
+ /// In case `meta` is a (fat) pointer to an unsized type, this operation
/// will ignore the pointer part, whereas for (thin) pointers to sized
/// types, this has the same effect as a simple cast.
///
/// The resulting pointer will have provenance of `self`, i.e., for a fat
/// pointer, this operation is semantically the same as creating a new
/// fat pointer with the data pointer value of `self` but the metadata of
- /// `val`.
+ /// `meta`.
///
/// # Examples
///
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index 8c1a64886..13f56c0ce 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -462,8 +462,6 @@ impl<T> NonNull<[T]> {
/// # Examples
///
/// ```rust
- /// #![feature(nonnull_slice_from_raw_parts)]
- ///
/// use std::ptr::NonNull;
///
/// // create a slice pointer when starting out with a pointer to the first element
@@ -475,8 +473,8 @@ impl<T> NonNull<[T]> {
///
/// (Note that this example artificially demonstrates a use of this method,
/// but `let slice = NonNull::from(&x[..]);` would be a better way to write code like this.)
- #[unstable(feature = "nonnull_slice_from_raw_parts", issue = "71941")]
- #[rustc_const_unstable(feature = "const_nonnull_slice_from_raw_parts", issue = "71941")]
+ #[stable(feature = "nonnull_slice_from_raw_parts", since = "1.70.0")]
+ #[rustc_const_unstable(feature = "const_slice_from_raw_parts_mut", issue = "67456")]
#[must_use]
#[inline]
pub const fn slice_from_raw_parts(data: NonNull<T>, len: usize) -> Self {
@@ -494,7 +492,6 @@ impl<T> NonNull<[T]> {
/// # Examples
///
/// ```rust
- /// #![feature(nonnull_slice_from_raw_parts)]
/// use std::ptr::NonNull;
///
/// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3);
@@ -514,7 +511,7 @@ impl<T> NonNull<[T]> {
/// # Examples
///
/// ```rust
- /// #![feature(slice_ptr_get, nonnull_slice_from_raw_parts)]
+ /// #![feature(slice_ptr_get)]
/// use std::ptr::NonNull;
///
/// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3);
@@ -534,7 +531,7 @@ impl<T> NonNull<[T]> {
/// # Examples
///
/// ```rust
- /// #![feature(slice_ptr_get, nonnull_slice_from_raw_parts)]
+ /// #![feature(slice_ptr_get)]
/// use std::ptr::NonNull;
///
/// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3);
@@ -668,7 +665,7 @@ impl<T> NonNull<[T]> {
/// # Examples
///
/// ```
- /// #![feature(slice_ptr_get, nonnull_slice_from_raw_parts)]
+ /// #![feature(slice_ptr_get)]
/// use std::ptr::NonNull;
///
/// let x = &mut [1, 2, 4];
diff --git a/library/core/src/result.rs b/library/core/src/result.rs
index 208b220c2..c48230fb8 100644
--- a/library/core/src/result.rs
+++ b/library/core/src/result.rs
@@ -545,8 +545,6 @@ impl<T, E> Result<T, E> {
/// # Examples
///
/// ```
- /// #![feature(is_some_and)]
- ///
/// let x: Result<u32, &str> = Ok(2);
/// assert_eq!(x.is_ok_and(|x| x > 1), true);
///
@@ -558,7 +556,7 @@ impl<T, E> Result<T, E> {
/// ```
#[must_use]
#[inline]
- #[unstable(feature = "is_some_and", issue = "93050")]
+ #[stable(feature = "is_some_and", since = "1.70.0")]
pub fn is_ok_and(self, f: impl FnOnce(T) -> bool) -> bool {
match self {
Err(_) => false,
@@ -590,7 +588,6 @@ impl<T, E> Result<T, E> {
/// # Examples
///
/// ```
- /// #![feature(is_some_and)]
/// use std::io::{Error, ErrorKind};
///
/// let x: Result<u32, Error> = Err(Error::new(ErrorKind::NotFound, "!"));
@@ -604,7 +601,7 @@ impl<T, E> Result<T, E> {
/// ```
#[must_use]
#[inline]
- #[unstable(feature = "is_some_and", issue = "93050")]
+ #[stable(feature = "is_some_and", since = "1.70.0")]
pub fn is_err_and(self, f: impl FnOnce(E) -> bool) -> bool {
match self {
Ok(_) => false,
@@ -908,6 +905,7 @@ impl<T, E> Result<T, E> {
/// let y: Result<&str, &u32> = Err(&42);
/// assert_eq!(x.as_deref(), y);
/// ```
+ #[inline]
#[stable(feature = "inner_deref", since = "1.47.0")]
pub fn as_deref(&self) -> Result<&T::Target, &E>
where
@@ -934,6 +932,7 @@ impl<T, E> Result<T, E> {
/// let y: Result<&mut str, &mut u32> = Err(&mut i);
/// assert_eq!(x.as_deref_mut().map(|x| { x.make_ascii_uppercase(); x }), y);
/// ```
+ #[inline]
#[stable(feature = "inner_deref", since = "1.47.0")]
pub fn as_deref_mut(&mut self) -> Result<&mut T::Target, &mut E>
where
@@ -1529,68 +1528,6 @@ impl<T, E> Result<T, E> {
Err(e) => e,
}
}
-
- /////////////////////////////////////////////////////////////////////////
- // Misc or niche
- /////////////////////////////////////////////////////////////////////////
-
- /// Returns `true` if the result is an [`Ok`] value containing the given value.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(option_result_contains)]
- ///
- /// let x: Result<u32, &str> = Ok(2);
- /// assert_eq!(x.contains(&2), true);
- ///
- /// let x: Result<u32, &str> = Ok(3);
- /// assert_eq!(x.contains(&2), false);
- ///
- /// let x: Result<u32, &str> = Err("Some error message");
- /// assert_eq!(x.contains(&2), false);
- /// ```
- #[must_use]
- #[inline]
- #[unstable(feature = "option_result_contains", issue = "62358")]
- pub fn contains<U>(&self, x: &U) -> bool
- where
- U: PartialEq<T>,
- {
- match self {
- Ok(y) => x == y,
- Err(_) => false,
- }
- }
-
- /// Returns `true` if the result is an [`Err`] value containing the given value.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(result_contains_err)]
- ///
- /// let x: Result<u32, &str> = Ok(2);
- /// assert_eq!(x.contains_err(&"Some error message"), false);
- ///
- /// let x: Result<u32, &str> = Err("Some error message");
- /// assert_eq!(x.contains_err(&"Some error message"), true);
- ///
- /// let x: Result<u32, &str> = Err("Some other error message");
- /// assert_eq!(x.contains_err(&"Some error message"), false);
- /// ```
- #[must_use]
- #[inline]
- #[unstable(feature = "result_contains_err", issue = "62358")]
- pub fn contains_err<F>(&self, f: &F) -> bool
- where
- F: PartialEq<E>,
- {
- match self {
- Ok(_) => false,
- Err(e) => f == e,
- }
- }
}
impl<T, E> Result<&T, E> {
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
index c295a0e06..353935324 100644
--- a/library/core/src/slice/index.rs
+++ b/library/core/src/slice/index.rs
@@ -2,6 +2,7 @@
use crate::intrinsics::assert_unsafe_precondition;
use crate::intrinsics::const_eval_select;
+use crate::intrinsics::unchecked_sub;
use crate::ops;
use crate::ptr;
@@ -371,25 +372,25 @@ unsafe impl<T> const SliceIndex<[T]> for ops::Range<usize> {
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
- let this = ops::Range { start: self.start, end: self.end };
+ let this = ops::Range { ..self };
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
- // so the call to `add` is safe.
-
+ // so the call to `add` is safe and the length calculation cannot overflow.
unsafe {
assert_unsafe_precondition!(
"slice::get_unchecked requires that the range is within the slice",
[T](this: ops::Range<usize>, slice: *const [T]) =>
this.end >= this.start && this.end <= slice.len()
);
- ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start)
+ let new_len = unchecked_sub(self.end, self.start);
+ ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), new_len)
}
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
- let this = ops::Range { start: self.start, end: self.end };
+ let this = ops::Range { ..self };
// SAFETY: see comments for `get_unchecked` above.
unsafe {
assert_unsafe_precondition!(
@@ -397,7 +398,8 @@ unsafe impl<T> const SliceIndex<[T]> for ops::Range<usize> {
[T](this: ops::Range<usize>, slice: *mut [T]) =>
this.end >= this.start && this.end <= slice.len()
);
- ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start)
+ let new_len = unchecked_sub(self.end, self.start);
+ ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), new_len)
}
}
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
index c4317799b..88b84bd13 100644
--- a/library/core/src/slice/iter.rs
+++ b/library/core/src/slice/iter.rs
@@ -132,9 +132,7 @@ iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, {
Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Option<Ordering>,
{
- self.as_slice().windows(2).all(|w| {
- compare(&&w[0], &&w[1]).map(|o| o != Ordering::Greater).unwrap_or(false)
- })
+ self.as_slice().is_sorted_by(|a, b| compare(&a, &b))
}
}}
diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs
index 89b92a7d5..392752f2a 100644
--- a/library/core/src/slice/iter/macros.rs
+++ b/library/core/src/slice/iter/macros.rs
@@ -176,11 +176,11 @@ macro_rules! iterator {
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let advance = cmp::min(len!(self), n);
// SAFETY: By construction, `advance` does not exceed `self.len()`.
unsafe { self.post_inc_start(advance) };
- if advance == n { Ok(()) } else { Err(advance) }
+ NonZeroUsize::new(n - advance).map_or(Ok(()), Err)
}
#[inline]
@@ -371,11 +371,11 @@ macro_rules! iterator {
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let advance = cmp::min(len!(self), n);
// SAFETY: By construction, `advance` does not exceed `self.len()`.
unsafe { self.pre_dec_end(advance) };
- if advance == n { Ok(()) } else { Err(advance) }
+ NonZeroUsize::new(n - advance).map_or(Ok(()), Err)
}
}
@@ -393,6 +393,20 @@ macro_rules! iterator {
}
}
}
+
+ #[stable(feature = "default_iters", since = "1.70.0")]
+ impl<T> Default for $name<'_, T> {
+ /// Creates an empty slice iterator.
+ ///
+ /// ```
+ #[doc = concat!("# use core::slice::", stringify!($name), ";")]
+ #[doc = concat!("let iter: ", stringify!($name<'_, u8>), " = Default::default();")]
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ (& $( $mut_ )? []).into_iter()
+ }
+ }
}
}
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index 1cd86b445..f541808a6 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -1695,7 +1695,13 @@ impl<T> [T] {
let ptr = self.as_ptr();
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
- unsafe { (from_raw_parts(ptr, mid), from_raw_parts(ptr.add(mid), len - mid)) }
+ unsafe {
+ assert_unsafe_precondition!(
+ "slice::split_at_unchecked requires the index to be within the slice",
+ (mid: usize, len: usize) => mid <= len
+ );
+ (from_raw_parts(ptr, mid), from_raw_parts(ptr.add(mid), len - mid))
+ }
}
/// Divides one mutable slice into two at an index, without doing bounds checking.
@@ -2381,7 +2387,8 @@ impl<T> [T] {
}
/// Binary searches this slice for a given element.
- /// This behaves similarly to [`contains`] if this slice is sorted.
+ /// If the slice is not sorted, the returned result is unspecified and
+ /// meaningless.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
@@ -2393,7 +2400,6 @@ impl<T> [T] {
///
/// See also [`binary_search_by`], [`binary_search_by_key`], and [`partition_point`].
///
- /// [`contains`]: slice::contains
/// [`binary_search_by`]: slice::binary_search_by
/// [`binary_search_by_key`]: slice::binary_search_by_key
/// [`partition_point`]: slice::partition_point
@@ -2456,12 +2462,13 @@ impl<T> [T] {
}
/// Binary searches this slice with a comparator function.
- /// This behaves similarly to [`contains`] if this slice is sorted.
///
- /// The comparator function should implement an order consistent
- /// with the sort order of the underlying slice, returning an
- /// order code that indicates whether its argument is `Less`,
- /// `Equal` or `Greater` the desired target.
+ /// The comparator function should return an order code that indicates
+ /// whether its argument is `Less`, `Equal` or `Greater` the desired
+ /// target.
+ /// If the slice is not sorted or if the comparator function does not
+ /// implement an order consistent with the sort order of the underlying
+ /// slice, the returned result is unspecified and meaningless.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
@@ -2473,7 +2480,6 @@ impl<T> [T] {
///
/// See also [`binary_search`], [`binary_search_by_key`], and [`partition_point`].
///
- /// [`contains`]: slice::contains
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by_key`]: slice::binary_search_by_key
/// [`partition_point`]: slice::partition_point
@@ -2542,10 +2548,11 @@ impl<T> [T] {
}
/// Binary searches this slice with a key extraction function.
- /// This behaves similarly to [`contains`] if this slice is sorted.
///
/// Assumes that the slice is sorted by the key, for instance with
/// [`sort_by_key`] using the same key extraction function.
+ /// If the slice is not sorted by the key, the returned result is
+ /// unspecified and meaningless.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
@@ -2557,7 +2564,6 @@ impl<T> [T] {
///
/// See also [`binary_search`], [`binary_search_by`], and [`partition_point`].
///
- /// [`contains`]: slice::contains
/// [`sort_by_key`]: slice::sort_by_key
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by`]: slice::binary_search_by
@@ -3816,7 +3822,7 @@ impl<T> [T] {
where
F: FnMut(&'a T, &'a T) -> Option<Ordering>,
{
- self.iter().is_sorted_by(|a, b| compare(*a, *b))
+ self.array_windows().all(|[a, b]| compare(a, b).map_or(false, Ordering::is_le))
}
/// Checks if the elements of this slice are sorted using the given key extraction function.
diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs
index 2333f60a8..07fd96f92 100644
--- a/library/core/src/slice/sort.rs
+++ b/library/core/src/slice/sort.rs
@@ -1486,7 +1486,7 @@ where
}
/// Finds a streak of presorted elements starting at the beginning of the slice. Returns the first
-/// value that is not part of said streak, and a bool denoting wether the streak was reversed.
+/// value that is not part of said streak, and a bool denoting whether the streak was reversed.
/// Streaks can be increasing or decreasing.
fn find_streak<T, F>(v: &[T], is_less: &mut F) -> (usize, bool)
where
diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs
index 95c682f42..772c36055 100644
--- a/library/core/src/str/iter.rs
+++ b/library/core/src/str/iter.rs
@@ -13,7 +13,7 @@ use super::from_utf8_unchecked;
use super::pattern::Pattern;
use super::pattern::{DoubleEndedSearcher, ReverseSearcher, Searcher};
use super::validations::{next_code_point, next_code_point_reverse};
-use super::LinesAnyMap;
+use super::LinesMap;
use super::{BytesIsNotEmpty, UnsafeBytesToStr};
use super::{CharEscapeDebugContinue, CharEscapeDefault, CharEscapeUnicode};
use super::{IsAsciiWhitespace, IsNotEmpty, IsWhitespace};
@@ -1104,7 +1104,7 @@ generate_pattern_iterators! {
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use = "iterators are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
-pub struct Lines<'a>(pub(super) Map<SplitTerminator<'a, char>, LinesAnyMap>);
+pub struct Lines<'a>(pub(super) Map<SplitInclusive<'a, char>, LinesMap>);
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Iterator for Lines<'a> {
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index ab2f8520e..041694299 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -1011,7 +1011,7 @@ impl str {
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn lines(&self) -> Lines<'_> {
- Lines(self.split_terminator('\n').map(LinesAnyMap))
+ Lines(self.split_inclusive('\n').map(LinesMap))
}
/// An iterator over the lines of a string.
@@ -2604,10 +2604,10 @@ impl Default for &mut str {
impl_fn_for_zst! {
/// A nameable, cloneable fn type
#[derive(Clone)]
- struct LinesAnyMap impl<'a> Fn = |line: &'a str| -> &'a str {
- let l = line.len();
- if l > 0 && line.as_bytes()[l - 1] == b'\r' { &line[0 .. l - 1] }
- else { line }
+ struct LinesMap impl<'a> Fn = |line: &'a str| -> &'a str {
+ let Some(line) = line.strip_suffix('\n') else { return line };
+ let Some(line) = line.strip_suffix('\r') else { return line };
+ line
};
#[derive(Clone)]
@@ -2655,5 +2655,6 @@ impl_fn_for_zst! {
};
}
+// This is required to make `impl From<&str> for Box<dyn Error>` and `impl<E> From<E> for Box<dyn Error>` not overlap.
#[stable(feature = "rust1", since = "1.0.0")]
impl !crate::error::Error for &str {}
diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs
index 19da6d2fb..e3a464a1c 100644
--- a/library/core/src/str/pattern.rs
+++ b/library/core/src/str/pattern.rs
@@ -1891,7 +1891,7 @@ unsafe fn small_slice_eq(x: &[u8], y: &[u8]) -> bool {
// SAFETY: Via the conditional above, we know that both `px` and `py`
// have the same length, so `px < pxend` implies that `py < pyend`.
- // Thus, derefencing both `px` and `py` in the loop below is safe.
+ // Thus, dereferencing both `px` and `py` in the loop below is safe.
//
// Moreover, we set `pxend` and `pyend` to be 4 bytes before the actual
// end of `px` and `py`. Thus, the final dereference outside of the
diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs
index 68f62ce8b..41c097b55 100644
--- a/library/core/src/str/traits.rs
+++ b/library/core/src/str/traits.rs
@@ -1,6 +1,7 @@
//! Trait implementations for `str`.
use crate::cmp::Ordering;
+use crate::intrinsics::assert_unsafe_precondition;
use crate::ops;
use crate::ptr;
use crate::slice::SliceIndex;
@@ -194,7 +195,21 @@ unsafe impl const SliceIndex<str> for ops::Range<usize> {
let slice = slice as *const [u8];
// SAFETY: the caller guarantees that `self` is in bounds of `slice`
// which satisfies all the conditions for `add`.
- let ptr = unsafe { slice.as_ptr().add(self.start) };
+ let ptr = unsafe {
+ let this = ops::Range { ..self };
+ assert_unsafe_precondition!(
+ "str::get_unchecked requires that the range is within the string slice",
+ (this: ops::Range<usize>, slice: *const [u8]) =>
+ // We'd like to check that the bounds are on char boundaries,
+ // but there's not really a way to do so without reading
+ // behind the pointer, which has aliasing implications.
+ // It's also not possible to move this check up to
+ // `str::get_unchecked` without adding a special function
+ // to `SliceIndex` just for this.
+ this.end >= this.start && this.end <= slice.len()
+ );
+ slice.as_ptr().add(self.start)
+ };
let len = self.end - self.start;
ptr::slice_from_raw_parts(ptr, len) as *const str
}
@@ -202,7 +217,15 @@ unsafe impl const SliceIndex<str> for ops::Range<usize> {
unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
let slice = slice as *mut [u8];
// SAFETY: see comments for `get_unchecked`.
- let ptr = unsafe { slice.as_mut_ptr().add(self.start) };
+ let ptr = unsafe {
+ let this = ops::Range { ..self };
+ assert_unsafe_precondition!(
+ "str::get_unchecked_mut requires that the range is within the string slice",
+ (this: ops::Range<usize>, slice: *mut [u8]) =>
+ this.end >= this.start && this.end <= slice.len()
+ );
+ slice.as_mut_ptr().add(self.start)
+ };
let len = self.end - self.start;
ptr::slice_from_raw_parts_mut(ptr, len) as *mut str
}
@@ -272,15 +295,13 @@ unsafe impl const SliceIndex<str> for ops::RangeTo<usize> {
}
#[inline]
unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
- let slice = slice as *const [u8];
- let ptr = slice.as_ptr();
- ptr::slice_from_raw_parts(ptr, self.end) as *const str
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+ unsafe { (0..self.end).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
- let slice = slice as *mut [u8];
- let ptr = slice.as_mut_ptr();
- ptr::slice_from_raw_parts_mut(ptr, self.end) as *mut str
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+ unsafe { (0..self.end).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &str) -> &Self::Output {
@@ -343,20 +364,15 @@ unsafe impl const SliceIndex<str> for ops::RangeFrom<usize> {
}
#[inline]
unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
- let slice = slice as *const [u8];
- // SAFETY: the caller guarantees that `self` is in bounds of `slice`
- // which satisfies all the conditions for `add`.
- let ptr = unsafe { slice.as_ptr().add(self.start) };
- let len = slice.len() - self.start;
- ptr::slice_from_raw_parts(ptr, len) as *const str
+ let len = (slice as *const [u8]).len();
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+ unsafe { (self.start..len).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
- let slice = slice as *mut [u8];
- // SAFETY: identical to `get_unchecked`.
- let ptr = unsafe { slice.as_mut_ptr().add(self.start) };
- let len = slice.len() - self.start;
- ptr::slice_from_raw_parts_mut(ptr, len) as *mut str
+ let len = (slice as *mut [u8]).len();
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+ unsafe { (self.start..len).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &str) -> &Self::Output {
@@ -452,35 +468,29 @@ unsafe impl const SliceIndex<str> for ops::RangeToInclusive<usize> {
type Output = str;
#[inline]
fn get(self, slice: &str) -> Option<&Self::Output> {
- if self.end == usize::MAX { None } else { (..self.end + 1).get(slice) }
+ (0..=self.end).get(slice)
}
#[inline]
fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
- if self.end == usize::MAX { None } else { (..self.end + 1).get_mut(slice) }
+ (0..=self.end).get_mut(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
// SAFETY: the caller must uphold the safety contract for `get_unchecked`.
- unsafe { (..self.end + 1).get_unchecked(slice) }
+ unsafe { (0..=self.end).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
// SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
- unsafe { (..self.end + 1).get_unchecked_mut(slice) }
+ unsafe { (0..=self.end).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &str) -> &Self::Output {
- if self.end == usize::MAX {
- str_index_overflow_fail();
- }
- (..self.end + 1).index(slice)
+ (0..=self.end).index(slice)
}
#[inline]
fn index_mut(self, slice: &mut str) -> &mut Self::Output {
- if self.end == usize::MAX {
- str_index_overflow_fail();
- }
- (..self.end + 1).index_mut(slice)
+ (0..=self.end).index_mut(slice)
}
}
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index 040a59184..f1ed68d72 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -960,6 +960,7 @@ impl AtomicBool {
/// ```ignore (extern-declaration)
/// # fn main() {
/// use std::sync::atomic::AtomicBool;
+ ///
/// extern "C" {
/// fn my_atomic_op(arg: *mut bool);
/// }
@@ -971,7 +972,8 @@ impl AtomicBool {
/// # }
/// ```
#[inline]
- #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
+ #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
+ #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
pub const fn as_ptr(&self) -> *mut bool {
self.v.get().cast()
}
@@ -1890,7 +1892,6 @@ impl<T> AtomicPtr<T> {
/// # Examples
///
/// ```ignore (extern-declaration)
- /// #![feature(atomic_mut_ptr)]
/// use std::sync::atomic::AtomicPtr;
///
/// extern "C" {
@@ -1906,7 +1907,8 @@ impl<T> AtomicPtr<T> {
/// }
/// ```
#[inline]
- #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
+ #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
+ #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
pub const fn as_ptr(&self) -> *mut *mut T {
self.p.get()
}
@@ -1949,8 +1951,7 @@ macro_rules! if_not_8_bit {
($_:ident, $($tt:tt)*) => { $($tt)* };
}
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic_load_store))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic_load_store = "8"))]
+#[cfg(target_has_atomic_load_store)]
macro_rules! atomic_int {
($cfg_cas:meta,
$cfg_align:meta,
@@ -2859,9 +2860,8 @@ macro_rules! atomic_int {
/// # }
/// ```
#[inline]
- #[unstable(feature = "atomic_mut_ptr",
- reason = "recently added",
- issue = "66893")]
+ #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
+ #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get()
}
@@ -3124,8 +3124,7 @@ atomic_int_ptr_sized! {
}
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
fn strongest_failure_ordering(order: Ordering) -> Ordering {
match order {
Release => Relaxed,
@@ -3167,8 +3166,7 @@ unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
}
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_swap`.
@@ -3185,8 +3183,7 @@ unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
/// Returns the previous value (like __sync_fetch_and_add).
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_add`.
@@ -3203,8 +3200,7 @@ unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
/// Returns the previous value (like __sync_fetch_and_sub).
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_sub`.
@@ -3220,8 +3216,7 @@ unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
}
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_compare_exchange<T: Copy>(
dst: *mut T,
@@ -3256,8 +3251,7 @@ unsafe fn atomic_compare_exchange<T: Copy>(
}
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_compare_exchange_weak<T: Copy>(
dst: *mut T,
@@ -3292,8 +3286,7 @@ unsafe fn atomic_compare_exchange_weak<T: Copy>(
}
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_and`
@@ -3309,8 +3302,7 @@ unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
}
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_nand`
@@ -3326,8 +3318,7 @@ unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
}
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_or`
@@ -3343,8 +3334,7 @@ unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
}
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_xor`
@@ -3361,8 +3351,7 @@ unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
/// returns the max value (signed comparison)
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_max`
@@ -3379,8 +3368,7 @@ unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
/// returns the min value (signed comparison)
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_min`
@@ -3397,8 +3385,7 @@ unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
/// returns the max value (unsigned comparison)
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_umax`
@@ -3415,8 +3402,7 @@ unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
/// returns the min value (unsigned comparison)
#[inline]
-#[cfg_attr(not(bootstrap), cfg(target_has_atomic))]
-#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))]
+#[cfg(target_has_atomic)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_umin`
diff --git a/library/core/src/sync/exclusive.rs b/library/core/src/sync/exclusive.rs
index 301ad41c9..3f3e19c55 100644
--- a/library/core/src/sync/exclusive.rs
+++ b/library/core/src/sync/exclusive.rs
@@ -69,9 +69,6 @@ use core::task::{Context, Poll};
/// for any value. This is a parallel with the fact that
/// `&` and `&mut` references together can be thought of as a _compile-time_
/// version of a read-write lock.
-///
-///
-/// [`Sync`]: core::marker::Sync
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[doc(alias = "SyncWrapper")]
#[doc(alias = "SyncCell")]
diff --git a/library/core/src/tuple.rs b/library/core/src/tuple.rs
index 28275798f..0620e7173 100644
--- a/library/core/src/tuple.rs
+++ b/library/core/src/tuple.rs
@@ -1,7 +1,7 @@
// See src/libstd/primitive_docs.rs for documentation.
-use crate::cmp::Ordering::*;
-use crate::cmp::*;
+use crate::cmp::Ordering::{self, *};
+use crate::mem::transmute;
// Recursive macro for implementing n-ary tuple functions and operations
//
@@ -61,19 +61,19 @@ macro_rules! tuple_impls {
}
#[inline]
fn lt(&self, other: &($($T,)+)) -> bool {
- lexical_ord!(lt, $( ${ignore(T)} self.${index()}, other.${index()} ),+)
+ lexical_ord!(lt, Less, $( ${ignore(T)} self.${index()}, other.${index()} ),+)
}
#[inline]
fn le(&self, other: &($($T,)+)) -> bool {
- lexical_ord!(le, $( ${ignore(T)} self.${index()}, other.${index()} ),+)
+ lexical_ord!(le, Less, $( ${ignore(T)} self.${index()}, other.${index()} ),+)
}
#[inline]
fn ge(&self, other: &($($T,)+)) -> bool {
- lexical_ord!(ge, $( ${ignore(T)} self.${index()}, other.${index()} ),+)
+ lexical_ord!(ge, Greater, $( ${ignore(T)} self.${index()}, other.${index()} ),+)
}
#[inline]
fn gt(&self, other: &($($T,)+)) -> bool {
- lexical_ord!(gt, $( ${ignore(T)} self.${index()}, other.${index()} ),+)
+ lexical_ord!(gt, Greater, $( ${ignore(T)} self.${index()}, other.${index()} ),+)
}
}
}
@@ -123,16 +123,38 @@ macro_rules! maybe_tuple_doc {
};
}
-// Constructs an expression that performs a lexical ordering using method $rel.
+#[inline]
+const fn ordering_is_some(c: Option<Ordering>, x: Ordering) -> bool {
+ // FIXME: Just use `==` once that's const-stable on `Option`s.
+ // This isn't using `match` because that optimizes worse due to
+ // making a two-step check (`Some` *then* the inner value).
+
+ // SAFETY: There's no public guarantee for `Option<Ordering>`,
+ // but we're core so we know that it's definitely a byte.
+ unsafe {
+ let c: i8 = transmute(c);
+ let x: i8 = transmute(Some(x));
+ c == x
+ }
+}
+
+// Constructs an expression that performs a lexical ordering using method `$rel`.
// The values are interleaved, so the macro invocation for
-// `(a1, a2, a3) < (b1, b2, b3)` would be `lexical_ord!(lt, a1, b1, a2, b2,
-// a3, b3)` (and similarly for `lexical_cmp`)
+// `(a1, a2, a3) < (b1, b2, b3)` would be `lexical_ord!(lt, opt_is_lt, a1, b1,
+// a2, b2, a3, b3)` (and similarly for `lexical_cmp`)
+//
+// `$ne_rel` is only used to determine the result after checking that they're
+// not equal, so `lt` and `le` can both just use `Less`.
macro_rules! lexical_ord {
- ($rel: ident, $a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
- if $a != $b { lexical_ord!($rel, $a, $b) }
- else { lexical_ord!($rel, $($rest_a, $rest_b),+) }
+ ($rel: ident, $ne_rel: ident, $a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {{
+ let c = PartialOrd::partial_cmp(&$a, &$b);
+ if !ordering_is_some(c, Equal) { ordering_is_some(c, $ne_rel) }
+ else { lexical_ord!($rel, $ne_rel, $($rest_a, $rest_b),+) }
+ }};
+ ($rel: ident, $ne_rel: ident, $a:expr, $b:expr) => {
+ // Use the specific method for the last element
+ PartialOrd::$rel(&$a, &$b)
};
- ($rel: ident, $a:expr, $b:expr) => { ($a) . $rel (& $b) };
}
macro_rules! lexical_partial_cmp {
diff --git a/library/core/src/unicode/unicode_data.rs b/library/core/src/unicode/unicode_data.rs
index bd69ca520..b25e9df28 100644
--- a/library/core/src/unicode/unicode_data.rs
+++ b/library/core/src/unicode/unicode_data.rs
@@ -580,14 +580,22 @@ pub mod white_space {
#[rustfmt::skip]
pub mod conversions {
+ const INDEX_MASK: u32 = 0x400000;
+
pub fn to_lower(c: char) -> [char; 3] {
if c.is_ascii() {
[(c as u8).to_ascii_lowercase() as char, '\0', '\0']
} else {
- match bsearch_case_table(c, LOWERCASE_TABLE) {
- None => [c, '\0', '\0'],
- Some(index) => LOWERCASE_TABLE[index].1,
- }
+ LOWERCASE_TABLE
+ .binary_search_by(|&(key, _)| key.cmp(&c))
+ .map(|i| {
+ let u = LOWERCASE_TABLE[i].1;
+ char::from_u32(u).map(|c| [c, '\0', '\0']).unwrap_or_else(|| {
+ // SAFETY: Index comes from statically generated table
+ unsafe { *LOWERCASE_TABLE_MULTI.get_unchecked((u & (INDEX_MASK - 1)) as usize) }
+ })
+ })
+ .unwrap_or([c, '\0', '\0'])
}
}
@@ -595,1800 +603,781 @@ pub mod conversions {
if c.is_ascii() {
[(c as u8).to_ascii_uppercase() as char, '\0', '\0']
} else {
- match bsearch_case_table(c, UPPERCASE_TABLE) {
- None => [c, '\0', '\0'],
- Some(index) => UPPERCASE_TABLE[index].1,
- }
+ UPPERCASE_TABLE
+ .binary_search_by(|&(key, _)| key.cmp(&c))
+ .map(|i| {
+ let u = UPPERCASE_TABLE[i].1;
+ char::from_u32(u).map(|c| [c, '\0', '\0']).unwrap_or_else(|| {
+ // SAFETY: Index comes from statically generated table
+ unsafe { *UPPERCASE_TABLE_MULTI.get_unchecked((u & (INDEX_MASK - 1)) as usize) }
+ })
+ })
+ .unwrap_or([c, '\0', '\0'])
}
}
- fn bsearch_case_table(c: char, table: &[(char, [char; 3])]) -> Option<usize> {
- table.binary_search_by(|&(key, _)| key.cmp(&c)).ok()
- }
- static LOWERCASE_TABLE: &[(char, [char; 3])] = &[
- ('A', ['a', '\u{0}', '\u{0}']), ('B', ['b', '\u{0}', '\u{0}']),
- ('C', ['c', '\u{0}', '\u{0}']), ('D', ['d', '\u{0}', '\u{0}']),
- ('E', ['e', '\u{0}', '\u{0}']), ('F', ['f', '\u{0}', '\u{0}']),
- ('G', ['g', '\u{0}', '\u{0}']), ('H', ['h', '\u{0}', '\u{0}']),
- ('I', ['i', '\u{0}', '\u{0}']), ('J', ['j', '\u{0}', '\u{0}']),
- ('K', ['k', '\u{0}', '\u{0}']), ('L', ['l', '\u{0}', '\u{0}']),
- ('M', ['m', '\u{0}', '\u{0}']), ('N', ['n', '\u{0}', '\u{0}']),
- ('O', ['o', '\u{0}', '\u{0}']), ('P', ['p', '\u{0}', '\u{0}']),
- ('Q', ['q', '\u{0}', '\u{0}']), ('R', ['r', '\u{0}', '\u{0}']),
- ('S', ['s', '\u{0}', '\u{0}']), ('T', ['t', '\u{0}', '\u{0}']),
- ('U', ['u', '\u{0}', '\u{0}']), ('V', ['v', '\u{0}', '\u{0}']),
- ('W', ['w', '\u{0}', '\u{0}']), ('X', ['x', '\u{0}', '\u{0}']),
- ('Y', ['y', '\u{0}', '\u{0}']), ('Z', ['z', '\u{0}', '\u{0}']),
- ('\u{c0}', ['\u{e0}', '\u{0}', '\u{0}']), ('\u{c1}', ['\u{e1}', '\u{0}', '\u{0}']),
- ('\u{c2}', ['\u{e2}', '\u{0}', '\u{0}']), ('\u{c3}', ['\u{e3}', '\u{0}', '\u{0}']),
- ('\u{c4}', ['\u{e4}', '\u{0}', '\u{0}']), ('\u{c5}', ['\u{e5}', '\u{0}', '\u{0}']),
- ('\u{c6}', ['\u{e6}', '\u{0}', '\u{0}']), ('\u{c7}', ['\u{e7}', '\u{0}', '\u{0}']),
- ('\u{c8}', ['\u{e8}', '\u{0}', '\u{0}']), ('\u{c9}', ['\u{e9}', '\u{0}', '\u{0}']),
- ('\u{ca}', ['\u{ea}', '\u{0}', '\u{0}']), ('\u{cb}', ['\u{eb}', '\u{0}', '\u{0}']),
- ('\u{cc}', ['\u{ec}', '\u{0}', '\u{0}']), ('\u{cd}', ['\u{ed}', '\u{0}', '\u{0}']),
- ('\u{ce}', ['\u{ee}', '\u{0}', '\u{0}']), ('\u{cf}', ['\u{ef}', '\u{0}', '\u{0}']),
- ('\u{d0}', ['\u{f0}', '\u{0}', '\u{0}']), ('\u{d1}', ['\u{f1}', '\u{0}', '\u{0}']),
- ('\u{d2}', ['\u{f2}', '\u{0}', '\u{0}']), ('\u{d3}', ['\u{f3}', '\u{0}', '\u{0}']),
- ('\u{d4}', ['\u{f4}', '\u{0}', '\u{0}']), ('\u{d5}', ['\u{f5}', '\u{0}', '\u{0}']),
- ('\u{d6}', ['\u{f6}', '\u{0}', '\u{0}']), ('\u{d8}', ['\u{f8}', '\u{0}', '\u{0}']),
- ('\u{d9}', ['\u{f9}', '\u{0}', '\u{0}']), ('\u{da}', ['\u{fa}', '\u{0}', '\u{0}']),
- ('\u{db}', ['\u{fb}', '\u{0}', '\u{0}']), ('\u{dc}', ['\u{fc}', '\u{0}', '\u{0}']),
- ('\u{dd}', ['\u{fd}', '\u{0}', '\u{0}']), ('\u{de}', ['\u{fe}', '\u{0}', '\u{0}']),
- ('\u{100}', ['\u{101}', '\u{0}', '\u{0}']), ('\u{102}', ['\u{103}', '\u{0}', '\u{0}']),
- ('\u{104}', ['\u{105}', '\u{0}', '\u{0}']), ('\u{106}', ['\u{107}', '\u{0}', '\u{0}']),
- ('\u{108}', ['\u{109}', '\u{0}', '\u{0}']), ('\u{10a}', ['\u{10b}', '\u{0}', '\u{0}']),
- ('\u{10c}', ['\u{10d}', '\u{0}', '\u{0}']), ('\u{10e}', ['\u{10f}', '\u{0}', '\u{0}']),
- ('\u{110}', ['\u{111}', '\u{0}', '\u{0}']), ('\u{112}', ['\u{113}', '\u{0}', '\u{0}']),
- ('\u{114}', ['\u{115}', '\u{0}', '\u{0}']), ('\u{116}', ['\u{117}', '\u{0}', '\u{0}']),
- ('\u{118}', ['\u{119}', '\u{0}', '\u{0}']), ('\u{11a}', ['\u{11b}', '\u{0}', '\u{0}']),
- ('\u{11c}', ['\u{11d}', '\u{0}', '\u{0}']), ('\u{11e}', ['\u{11f}', '\u{0}', '\u{0}']),
- ('\u{120}', ['\u{121}', '\u{0}', '\u{0}']), ('\u{122}', ['\u{123}', '\u{0}', '\u{0}']),
- ('\u{124}', ['\u{125}', '\u{0}', '\u{0}']), ('\u{126}', ['\u{127}', '\u{0}', '\u{0}']),
- ('\u{128}', ['\u{129}', '\u{0}', '\u{0}']), ('\u{12a}', ['\u{12b}', '\u{0}', '\u{0}']),
- ('\u{12c}', ['\u{12d}', '\u{0}', '\u{0}']), ('\u{12e}', ['\u{12f}', '\u{0}', '\u{0}']),
- ('\u{130}', ['i', '\u{307}', '\u{0}']), ('\u{132}', ['\u{133}', '\u{0}', '\u{0}']),
- ('\u{134}', ['\u{135}', '\u{0}', '\u{0}']), ('\u{136}', ['\u{137}', '\u{0}', '\u{0}']),
- ('\u{139}', ['\u{13a}', '\u{0}', '\u{0}']), ('\u{13b}', ['\u{13c}', '\u{0}', '\u{0}']),
- ('\u{13d}', ['\u{13e}', '\u{0}', '\u{0}']), ('\u{13f}', ['\u{140}', '\u{0}', '\u{0}']),
- ('\u{141}', ['\u{142}', '\u{0}', '\u{0}']), ('\u{143}', ['\u{144}', '\u{0}', '\u{0}']),
- ('\u{145}', ['\u{146}', '\u{0}', '\u{0}']), ('\u{147}', ['\u{148}', '\u{0}', '\u{0}']),
- ('\u{14a}', ['\u{14b}', '\u{0}', '\u{0}']), ('\u{14c}', ['\u{14d}', '\u{0}', '\u{0}']),
- ('\u{14e}', ['\u{14f}', '\u{0}', '\u{0}']), ('\u{150}', ['\u{151}', '\u{0}', '\u{0}']),
- ('\u{152}', ['\u{153}', '\u{0}', '\u{0}']), ('\u{154}', ['\u{155}', '\u{0}', '\u{0}']),
- ('\u{156}', ['\u{157}', '\u{0}', '\u{0}']), ('\u{158}', ['\u{159}', '\u{0}', '\u{0}']),
- ('\u{15a}', ['\u{15b}', '\u{0}', '\u{0}']), ('\u{15c}', ['\u{15d}', '\u{0}', '\u{0}']),
- ('\u{15e}', ['\u{15f}', '\u{0}', '\u{0}']), ('\u{160}', ['\u{161}', '\u{0}', '\u{0}']),
- ('\u{162}', ['\u{163}', '\u{0}', '\u{0}']), ('\u{164}', ['\u{165}', '\u{0}', '\u{0}']),
- ('\u{166}', ['\u{167}', '\u{0}', '\u{0}']), ('\u{168}', ['\u{169}', '\u{0}', '\u{0}']),
- ('\u{16a}', ['\u{16b}', '\u{0}', '\u{0}']), ('\u{16c}', ['\u{16d}', '\u{0}', '\u{0}']),
- ('\u{16e}', ['\u{16f}', '\u{0}', '\u{0}']), ('\u{170}', ['\u{171}', '\u{0}', '\u{0}']),
- ('\u{172}', ['\u{173}', '\u{0}', '\u{0}']), ('\u{174}', ['\u{175}', '\u{0}', '\u{0}']),
- ('\u{176}', ['\u{177}', '\u{0}', '\u{0}']), ('\u{178}', ['\u{ff}', '\u{0}', '\u{0}']),
- ('\u{179}', ['\u{17a}', '\u{0}', '\u{0}']), ('\u{17b}', ['\u{17c}', '\u{0}', '\u{0}']),
- ('\u{17d}', ['\u{17e}', '\u{0}', '\u{0}']), ('\u{181}', ['\u{253}', '\u{0}', '\u{0}']),
- ('\u{182}', ['\u{183}', '\u{0}', '\u{0}']), ('\u{184}', ['\u{185}', '\u{0}', '\u{0}']),
- ('\u{186}', ['\u{254}', '\u{0}', '\u{0}']), ('\u{187}', ['\u{188}', '\u{0}', '\u{0}']),
- ('\u{189}', ['\u{256}', '\u{0}', '\u{0}']), ('\u{18a}', ['\u{257}', '\u{0}', '\u{0}']),
- ('\u{18b}', ['\u{18c}', '\u{0}', '\u{0}']), ('\u{18e}', ['\u{1dd}', '\u{0}', '\u{0}']),
- ('\u{18f}', ['\u{259}', '\u{0}', '\u{0}']), ('\u{190}', ['\u{25b}', '\u{0}', '\u{0}']),
- ('\u{191}', ['\u{192}', '\u{0}', '\u{0}']), ('\u{193}', ['\u{260}', '\u{0}', '\u{0}']),
- ('\u{194}', ['\u{263}', '\u{0}', '\u{0}']), ('\u{196}', ['\u{269}', '\u{0}', '\u{0}']),
- ('\u{197}', ['\u{268}', '\u{0}', '\u{0}']), ('\u{198}', ['\u{199}', '\u{0}', '\u{0}']),
- ('\u{19c}', ['\u{26f}', '\u{0}', '\u{0}']), ('\u{19d}', ['\u{272}', '\u{0}', '\u{0}']),
- ('\u{19f}', ['\u{275}', '\u{0}', '\u{0}']), ('\u{1a0}', ['\u{1a1}', '\u{0}', '\u{0}']),
- ('\u{1a2}', ['\u{1a3}', '\u{0}', '\u{0}']), ('\u{1a4}', ['\u{1a5}', '\u{0}', '\u{0}']),
- ('\u{1a6}', ['\u{280}', '\u{0}', '\u{0}']), ('\u{1a7}', ['\u{1a8}', '\u{0}', '\u{0}']),
- ('\u{1a9}', ['\u{283}', '\u{0}', '\u{0}']), ('\u{1ac}', ['\u{1ad}', '\u{0}', '\u{0}']),
- ('\u{1ae}', ['\u{288}', '\u{0}', '\u{0}']), ('\u{1af}', ['\u{1b0}', '\u{0}', '\u{0}']),
- ('\u{1b1}', ['\u{28a}', '\u{0}', '\u{0}']), ('\u{1b2}', ['\u{28b}', '\u{0}', '\u{0}']),
- ('\u{1b3}', ['\u{1b4}', '\u{0}', '\u{0}']), ('\u{1b5}', ['\u{1b6}', '\u{0}', '\u{0}']),
- ('\u{1b7}', ['\u{292}', '\u{0}', '\u{0}']), ('\u{1b8}', ['\u{1b9}', '\u{0}', '\u{0}']),
- ('\u{1bc}', ['\u{1bd}', '\u{0}', '\u{0}']), ('\u{1c4}', ['\u{1c6}', '\u{0}', '\u{0}']),
- ('\u{1c5}', ['\u{1c6}', '\u{0}', '\u{0}']), ('\u{1c7}', ['\u{1c9}', '\u{0}', '\u{0}']),
- ('\u{1c8}', ['\u{1c9}', '\u{0}', '\u{0}']), ('\u{1ca}', ['\u{1cc}', '\u{0}', '\u{0}']),
- ('\u{1cb}', ['\u{1cc}', '\u{0}', '\u{0}']), ('\u{1cd}', ['\u{1ce}', '\u{0}', '\u{0}']),
- ('\u{1cf}', ['\u{1d0}', '\u{0}', '\u{0}']), ('\u{1d1}', ['\u{1d2}', '\u{0}', '\u{0}']),
- ('\u{1d3}', ['\u{1d4}', '\u{0}', '\u{0}']), ('\u{1d5}', ['\u{1d6}', '\u{0}', '\u{0}']),
- ('\u{1d7}', ['\u{1d8}', '\u{0}', '\u{0}']), ('\u{1d9}', ['\u{1da}', '\u{0}', '\u{0}']),
- ('\u{1db}', ['\u{1dc}', '\u{0}', '\u{0}']), ('\u{1de}', ['\u{1df}', '\u{0}', '\u{0}']),
- ('\u{1e0}', ['\u{1e1}', '\u{0}', '\u{0}']), ('\u{1e2}', ['\u{1e3}', '\u{0}', '\u{0}']),
- ('\u{1e4}', ['\u{1e5}', '\u{0}', '\u{0}']), ('\u{1e6}', ['\u{1e7}', '\u{0}', '\u{0}']),
- ('\u{1e8}', ['\u{1e9}', '\u{0}', '\u{0}']), ('\u{1ea}', ['\u{1eb}', '\u{0}', '\u{0}']),
- ('\u{1ec}', ['\u{1ed}', '\u{0}', '\u{0}']), ('\u{1ee}', ['\u{1ef}', '\u{0}', '\u{0}']),
- ('\u{1f1}', ['\u{1f3}', '\u{0}', '\u{0}']), ('\u{1f2}', ['\u{1f3}', '\u{0}', '\u{0}']),
- ('\u{1f4}', ['\u{1f5}', '\u{0}', '\u{0}']), ('\u{1f6}', ['\u{195}', '\u{0}', '\u{0}']),
- ('\u{1f7}', ['\u{1bf}', '\u{0}', '\u{0}']), ('\u{1f8}', ['\u{1f9}', '\u{0}', '\u{0}']),
- ('\u{1fa}', ['\u{1fb}', '\u{0}', '\u{0}']), ('\u{1fc}', ['\u{1fd}', '\u{0}', '\u{0}']),
- ('\u{1fe}', ['\u{1ff}', '\u{0}', '\u{0}']), ('\u{200}', ['\u{201}', '\u{0}', '\u{0}']),
- ('\u{202}', ['\u{203}', '\u{0}', '\u{0}']), ('\u{204}', ['\u{205}', '\u{0}', '\u{0}']),
- ('\u{206}', ['\u{207}', '\u{0}', '\u{0}']), ('\u{208}', ['\u{209}', '\u{0}', '\u{0}']),
- ('\u{20a}', ['\u{20b}', '\u{0}', '\u{0}']), ('\u{20c}', ['\u{20d}', '\u{0}', '\u{0}']),
- ('\u{20e}', ['\u{20f}', '\u{0}', '\u{0}']), ('\u{210}', ['\u{211}', '\u{0}', '\u{0}']),
- ('\u{212}', ['\u{213}', '\u{0}', '\u{0}']), ('\u{214}', ['\u{215}', '\u{0}', '\u{0}']),
- ('\u{216}', ['\u{217}', '\u{0}', '\u{0}']), ('\u{218}', ['\u{219}', '\u{0}', '\u{0}']),
- ('\u{21a}', ['\u{21b}', '\u{0}', '\u{0}']), ('\u{21c}', ['\u{21d}', '\u{0}', '\u{0}']),
- ('\u{21e}', ['\u{21f}', '\u{0}', '\u{0}']), ('\u{220}', ['\u{19e}', '\u{0}', '\u{0}']),
- ('\u{222}', ['\u{223}', '\u{0}', '\u{0}']), ('\u{224}', ['\u{225}', '\u{0}', '\u{0}']),
- ('\u{226}', ['\u{227}', '\u{0}', '\u{0}']), ('\u{228}', ['\u{229}', '\u{0}', '\u{0}']),
- ('\u{22a}', ['\u{22b}', '\u{0}', '\u{0}']), ('\u{22c}', ['\u{22d}', '\u{0}', '\u{0}']),
- ('\u{22e}', ['\u{22f}', '\u{0}', '\u{0}']), ('\u{230}', ['\u{231}', '\u{0}', '\u{0}']),
- ('\u{232}', ['\u{233}', '\u{0}', '\u{0}']), ('\u{23a}', ['\u{2c65}', '\u{0}', '\u{0}']),
- ('\u{23b}', ['\u{23c}', '\u{0}', '\u{0}']), ('\u{23d}', ['\u{19a}', '\u{0}', '\u{0}']),
- ('\u{23e}', ['\u{2c66}', '\u{0}', '\u{0}']), ('\u{241}', ['\u{242}', '\u{0}', '\u{0}']),
- ('\u{243}', ['\u{180}', '\u{0}', '\u{0}']), ('\u{244}', ['\u{289}', '\u{0}', '\u{0}']),
- ('\u{245}', ['\u{28c}', '\u{0}', '\u{0}']), ('\u{246}', ['\u{247}', '\u{0}', '\u{0}']),
- ('\u{248}', ['\u{249}', '\u{0}', '\u{0}']), ('\u{24a}', ['\u{24b}', '\u{0}', '\u{0}']),
- ('\u{24c}', ['\u{24d}', '\u{0}', '\u{0}']), ('\u{24e}', ['\u{24f}', '\u{0}', '\u{0}']),
- ('\u{370}', ['\u{371}', '\u{0}', '\u{0}']), ('\u{372}', ['\u{373}', '\u{0}', '\u{0}']),
- ('\u{376}', ['\u{377}', '\u{0}', '\u{0}']), ('\u{37f}', ['\u{3f3}', '\u{0}', '\u{0}']),
- ('\u{386}', ['\u{3ac}', '\u{0}', '\u{0}']), ('\u{388}', ['\u{3ad}', '\u{0}', '\u{0}']),
- ('\u{389}', ['\u{3ae}', '\u{0}', '\u{0}']), ('\u{38a}', ['\u{3af}', '\u{0}', '\u{0}']),
- ('\u{38c}', ['\u{3cc}', '\u{0}', '\u{0}']), ('\u{38e}', ['\u{3cd}', '\u{0}', '\u{0}']),
- ('\u{38f}', ['\u{3ce}', '\u{0}', '\u{0}']), ('\u{391}', ['\u{3b1}', '\u{0}', '\u{0}']),
- ('\u{392}', ['\u{3b2}', '\u{0}', '\u{0}']), ('\u{393}', ['\u{3b3}', '\u{0}', '\u{0}']),
- ('\u{394}', ['\u{3b4}', '\u{0}', '\u{0}']), ('\u{395}', ['\u{3b5}', '\u{0}', '\u{0}']),
- ('\u{396}', ['\u{3b6}', '\u{0}', '\u{0}']), ('\u{397}', ['\u{3b7}', '\u{0}', '\u{0}']),
- ('\u{398}', ['\u{3b8}', '\u{0}', '\u{0}']), ('\u{399}', ['\u{3b9}', '\u{0}', '\u{0}']),
- ('\u{39a}', ['\u{3ba}', '\u{0}', '\u{0}']), ('\u{39b}', ['\u{3bb}', '\u{0}', '\u{0}']),
- ('\u{39c}', ['\u{3bc}', '\u{0}', '\u{0}']), ('\u{39d}', ['\u{3bd}', '\u{0}', '\u{0}']),
- ('\u{39e}', ['\u{3be}', '\u{0}', '\u{0}']), ('\u{39f}', ['\u{3bf}', '\u{0}', '\u{0}']),
- ('\u{3a0}', ['\u{3c0}', '\u{0}', '\u{0}']), ('\u{3a1}', ['\u{3c1}', '\u{0}', '\u{0}']),
- ('\u{3a3}', ['\u{3c3}', '\u{0}', '\u{0}']), ('\u{3a4}', ['\u{3c4}', '\u{0}', '\u{0}']),
- ('\u{3a5}', ['\u{3c5}', '\u{0}', '\u{0}']), ('\u{3a6}', ['\u{3c6}', '\u{0}', '\u{0}']),
- ('\u{3a7}', ['\u{3c7}', '\u{0}', '\u{0}']), ('\u{3a8}', ['\u{3c8}', '\u{0}', '\u{0}']),
- ('\u{3a9}', ['\u{3c9}', '\u{0}', '\u{0}']), ('\u{3aa}', ['\u{3ca}', '\u{0}', '\u{0}']),
- ('\u{3ab}', ['\u{3cb}', '\u{0}', '\u{0}']), ('\u{3cf}', ['\u{3d7}', '\u{0}', '\u{0}']),
- ('\u{3d8}', ['\u{3d9}', '\u{0}', '\u{0}']), ('\u{3da}', ['\u{3db}', '\u{0}', '\u{0}']),
- ('\u{3dc}', ['\u{3dd}', '\u{0}', '\u{0}']), ('\u{3de}', ['\u{3df}', '\u{0}', '\u{0}']),
- ('\u{3e0}', ['\u{3e1}', '\u{0}', '\u{0}']), ('\u{3e2}', ['\u{3e3}', '\u{0}', '\u{0}']),
- ('\u{3e4}', ['\u{3e5}', '\u{0}', '\u{0}']), ('\u{3e6}', ['\u{3e7}', '\u{0}', '\u{0}']),
- ('\u{3e8}', ['\u{3e9}', '\u{0}', '\u{0}']), ('\u{3ea}', ['\u{3eb}', '\u{0}', '\u{0}']),
- ('\u{3ec}', ['\u{3ed}', '\u{0}', '\u{0}']), ('\u{3ee}', ['\u{3ef}', '\u{0}', '\u{0}']),
- ('\u{3f4}', ['\u{3b8}', '\u{0}', '\u{0}']), ('\u{3f7}', ['\u{3f8}', '\u{0}', '\u{0}']),
- ('\u{3f9}', ['\u{3f2}', '\u{0}', '\u{0}']), ('\u{3fa}', ['\u{3fb}', '\u{0}', '\u{0}']),
- ('\u{3fd}', ['\u{37b}', '\u{0}', '\u{0}']), ('\u{3fe}', ['\u{37c}', '\u{0}', '\u{0}']),
- ('\u{3ff}', ['\u{37d}', '\u{0}', '\u{0}']), ('\u{400}', ['\u{450}', '\u{0}', '\u{0}']),
- ('\u{401}', ['\u{451}', '\u{0}', '\u{0}']), ('\u{402}', ['\u{452}', '\u{0}', '\u{0}']),
- ('\u{403}', ['\u{453}', '\u{0}', '\u{0}']), ('\u{404}', ['\u{454}', '\u{0}', '\u{0}']),
- ('\u{405}', ['\u{455}', '\u{0}', '\u{0}']), ('\u{406}', ['\u{456}', '\u{0}', '\u{0}']),
- ('\u{407}', ['\u{457}', '\u{0}', '\u{0}']), ('\u{408}', ['\u{458}', '\u{0}', '\u{0}']),
- ('\u{409}', ['\u{459}', '\u{0}', '\u{0}']), ('\u{40a}', ['\u{45a}', '\u{0}', '\u{0}']),
- ('\u{40b}', ['\u{45b}', '\u{0}', '\u{0}']), ('\u{40c}', ['\u{45c}', '\u{0}', '\u{0}']),
- ('\u{40d}', ['\u{45d}', '\u{0}', '\u{0}']), ('\u{40e}', ['\u{45e}', '\u{0}', '\u{0}']),
- ('\u{40f}', ['\u{45f}', '\u{0}', '\u{0}']), ('\u{410}', ['\u{430}', '\u{0}', '\u{0}']),
- ('\u{411}', ['\u{431}', '\u{0}', '\u{0}']), ('\u{412}', ['\u{432}', '\u{0}', '\u{0}']),
- ('\u{413}', ['\u{433}', '\u{0}', '\u{0}']), ('\u{414}', ['\u{434}', '\u{0}', '\u{0}']),
- ('\u{415}', ['\u{435}', '\u{0}', '\u{0}']), ('\u{416}', ['\u{436}', '\u{0}', '\u{0}']),
- ('\u{417}', ['\u{437}', '\u{0}', '\u{0}']), ('\u{418}', ['\u{438}', '\u{0}', '\u{0}']),
- ('\u{419}', ['\u{439}', '\u{0}', '\u{0}']), ('\u{41a}', ['\u{43a}', '\u{0}', '\u{0}']),
- ('\u{41b}', ['\u{43b}', '\u{0}', '\u{0}']), ('\u{41c}', ['\u{43c}', '\u{0}', '\u{0}']),
- ('\u{41d}', ['\u{43d}', '\u{0}', '\u{0}']), ('\u{41e}', ['\u{43e}', '\u{0}', '\u{0}']),
- ('\u{41f}', ['\u{43f}', '\u{0}', '\u{0}']), ('\u{420}', ['\u{440}', '\u{0}', '\u{0}']),
- ('\u{421}', ['\u{441}', '\u{0}', '\u{0}']), ('\u{422}', ['\u{442}', '\u{0}', '\u{0}']),
- ('\u{423}', ['\u{443}', '\u{0}', '\u{0}']), ('\u{424}', ['\u{444}', '\u{0}', '\u{0}']),
- ('\u{425}', ['\u{445}', '\u{0}', '\u{0}']), ('\u{426}', ['\u{446}', '\u{0}', '\u{0}']),
- ('\u{427}', ['\u{447}', '\u{0}', '\u{0}']), ('\u{428}', ['\u{448}', '\u{0}', '\u{0}']),
- ('\u{429}', ['\u{449}', '\u{0}', '\u{0}']), ('\u{42a}', ['\u{44a}', '\u{0}', '\u{0}']),
- ('\u{42b}', ['\u{44b}', '\u{0}', '\u{0}']), ('\u{42c}', ['\u{44c}', '\u{0}', '\u{0}']),
- ('\u{42d}', ['\u{44d}', '\u{0}', '\u{0}']), ('\u{42e}', ['\u{44e}', '\u{0}', '\u{0}']),
- ('\u{42f}', ['\u{44f}', '\u{0}', '\u{0}']), ('\u{460}', ['\u{461}', '\u{0}', '\u{0}']),
- ('\u{462}', ['\u{463}', '\u{0}', '\u{0}']), ('\u{464}', ['\u{465}', '\u{0}', '\u{0}']),
- ('\u{466}', ['\u{467}', '\u{0}', '\u{0}']), ('\u{468}', ['\u{469}', '\u{0}', '\u{0}']),
- ('\u{46a}', ['\u{46b}', '\u{0}', '\u{0}']), ('\u{46c}', ['\u{46d}', '\u{0}', '\u{0}']),
- ('\u{46e}', ['\u{46f}', '\u{0}', '\u{0}']), ('\u{470}', ['\u{471}', '\u{0}', '\u{0}']),
- ('\u{472}', ['\u{473}', '\u{0}', '\u{0}']), ('\u{474}', ['\u{475}', '\u{0}', '\u{0}']),
- ('\u{476}', ['\u{477}', '\u{0}', '\u{0}']), ('\u{478}', ['\u{479}', '\u{0}', '\u{0}']),
- ('\u{47a}', ['\u{47b}', '\u{0}', '\u{0}']), ('\u{47c}', ['\u{47d}', '\u{0}', '\u{0}']),
- ('\u{47e}', ['\u{47f}', '\u{0}', '\u{0}']), ('\u{480}', ['\u{481}', '\u{0}', '\u{0}']),
- ('\u{48a}', ['\u{48b}', '\u{0}', '\u{0}']), ('\u{48c}', ['\u{48d}', '\u{0}', '\u{0}']),
- ('\u{48e}', ['\u{48f}', '\u{0}', '\u{0}']), ('\u{490}', ['\u{491}', '\u{0}', '\u{0}']),
- ('\u{492}', ['\u{493}', '\u{0}', '\u{0}']), ('\u{494}', ['\u{495}', '\u{0}', '\u{0}']),
- ('\u{496}', ['\u{497}', '\u{0}', '\u{0}']), ('\u{498}', ['\u{499}', '\u{0}', '\u{0}']),
- ('\u{49a}', ['\u{49b}', '\u{0}', '\u{0}']), ('\u{49c}', ['\u{49d}', '\u{0}', '\u{0}']),
- ('\u{49e}', ['\u{49f}', '\u{0}', '\u{0}']), ('\u{4a0}', ['\u{4a1}', '\u{0}', '\u{0}']),
- ('\u{4a2}', ['\u{4a3}', '\u{0}', '\u{0}']), ('\u{4a4}', ['\u{4a5}', '\u{0}', '\u{0}']),
- ('\u{4a6}', ['\u{4a7}', '\u{0}', '\u{0}']), ('\u{4a8}', ['\u{4a9}', '\u{0}', '\u{0}']),
- ('\u{4aa}', ['\u{4ab}', '\u{0}', '\u{0}']), ('\u{4ac}', ['\u{4ad}', '\u{0}', '\u{0}']),
- ('\u{4ae}', ['\u{4af}', '\u{0}', '\u{0}']), ('\u{4b0}', ['\u{4b1}', '\u{0}', '\u{0}']),
- ('\u{4b2}', ['\u{4b3}', '\u{0}', '\u{0}']), ('\u{4b4}', ['\u{4b5}', '\u{0}', '\u{0}']),
- ('\u{4b6}', ['\u{4b7}', '\u{0}', '\u{0}']), ('\u{4b8}', ['\u{4b9}', '\u{0}', '\u{0}']),
- ('\u{4ba}', ['\u{4bb}', '\u{0}', '\u{0}']), ('\u{4bc}', ['\u{4bd}', '\u{0}', '\u{0}']),
- ('\u{4be}', ['\u{4bf}', '\u{0}', '\u{0}']), ('\u{4c0}', ['\u{4cf}', '\u{0}', '\u{0}']),
- ('\u{4c1}', ['\u{4c2}', '\u{0}', '\u{0}']), ('\u{4c3}', ['\u{4c4}', '\u{0}', '\u{0}']),
- ('\u{4c5}', ['\u{4c6}', '\u{0}', '\u{0}']), ('\u{4c7}', ['\u{4c8}', '\u{0}', '\u{0}']),
- ('\u{4c9}', ['\u{4ca}', '\u{0}', '\u{0}']), ('\u{4cb}', ['\u{4cc}', '\u{0}', '\u{0}']),
- ('\u{4cd}', ['\u{4ce}', '\u{0}', '\u{0}']), ('\u{4d0}', ['\u{4d1}', '\u{0}', '\u{0}']),
- ('\u{4d2}', ['\u{4d3}', '\u{0}', '\u{0}']), ('\u{4d4}', ['\u{4d5}', '\u{0}', '\u{0}']),
- ('\u{4d6}', ['\u{4d7}', '\u{0}', '\u{0}']), ('\u{4d8}', ['\u{4d9}', '\u{0}', '\u{0}']),
- ('\u{4da}', ['\u{4db}', '\u{0}', '\u{0}']), ('\u{4dc}', ['\u{4dd}', '\u{0}', '\u{0}']),
- ('\u{4de}', ['\u{4df}', '\u{0}', '\u{0}']), ('\u{4e0}', ['\u{4e1}', '\u{0}', '\u{0}']),
- ('\u{4e2}', ['\u{4e3}', '\u{0}', '\u{0}']), ('\u{4e4}', ['\u{4e5}', '\u{0}', '\u{0}']),
- ('\u{4e6}', ['\u{4e7}', '\u{0}', '\u{0}']), ('\u{4e8}', ['\u{4e9}', '\u{0}', '\u{0}']),
- ('\u{4ea}', ['\u{4eb}', '\u{0}', '\u{0}']), ('\u{4ec}', ['\u{4ed}', '\u{0}', '\u{0}']),
- ('\u{4ee}', ['\u{4ef}', '\u{0}', '\u{0}']), ('\u{4f0}', ['\u{4f1}', '\u{0}', '\u{0}']),
- ('\u{4f2}', ['\u{4f3}', '\u{0}', '\u{0}']), ('\u{4f4}', ['\u{4f5}', '\u{0}', '\u{0}']),
- ('\u{4f6}', ['\u{4f7}', '\u{0}', '\u{0}']), ('\u{4f8}', ['\u{4f9}', '\u{0}', '\u{0}']),
- ('\u{4fa}', ['\u{4fb}', '\u{0}', '\u{0}']), ('\u{4fc}', ['\u{4fd}', '\u{0}', '\u{0}']),
- ('\u{4fe}', ['\u{4ff}', '\u{0}', '\u{0}']), ('\u{500}', ['\u{501}', '\u{0}', '\u{0}']),
- ('\u{502}', ['\u{503}', '\u{0}', '\u{0}']), ('\u{504}', ['\u{505}', '\u{0}', '\u{0}']),
- ('\u{506}', ['\u{507}', '\u{0}', '\u{0}']), ('\u{508}', ['\u{509}', '\u{0}', '\u{0}']),
- ('\u{50a}', ['\u{50b}', '\u{0}', '\u{0}']), ('\u{50c}', ['\u{50d}', '\u{0}', '\u{0}']),
- ('\u{50e}', ['\u{50f}', '\u{0}', '\u{0}']), ('\u{510}', ['\u{511}', '\u{0}', '\u{0}']),
- ('\u{512}', ['\u{513}', '\u{0}', '\u{0}']), ('\u{514}', ['\u{515}', '\u{0}', '\u{0}']),
- ('\u{516}', ['\u{517}', '\u{0}', '\u{0}']), ('\u{518}', ['\u{519}', '\u{0}', '\u{0}']),
- ('\u{51a}', ['\u{51b}', '\u{0}', '\u{0}']), ('\u{51c}', ['\u{51d}', '\u{0}', '\u{0}']),
- ('\u{51e}', ['\u{51f}', '\u{0}', '\u{0}']), ('\u{520}', ['\u{521}', '\u{0}', '\u{0}']),
- ('\u{522}', ['\u{523}', '\u{0}', '\u{0}']), ('\u{524}', ['\u{525}', '\u{0}', '\u{0}']),
- ('\u{526}', ['\u{527}', '\u{0}', '\u{0}']), ('\u{528}', ['\u{529}', '\u{0}', '\u{0}']),
- ('\u{52a}', ['\u{52b}', '\u{0}', '\u{0}']), ('\u{52c}', ['\u{52d}', '\u{0}', '\u{0}']),
- ('\u{52e}', ['\u{52f}', '\u{0}', '\u{0}']), ('\u{531}', ['\u{561}', '\u{0}', '\u{0}']),
- ('\u{532}', ['\u{562}', '\u{0}', '\u{0}']), ('\u{533}', ['\u{563}', '\u{0}', '\u{0}']),
- ('\u{534}', ['\u{564}', '\u{0}', '\u{0}']), ('\u{535}', ['\u{565}', '\u{0}', '\u{0}']),
- ('\u{536}', ['\u{566}', '\u{0}', '\u{0}']), ('\u{537}', ['\u{567}', '\u{0}', '\u{0}']),
- ('\u{538}', ['\u{568}', '\u{0}', '\u{0}']), ('\u{539}', ['\u{569}', '\u{0}', '\u{0}']),
- ('\u{53a}', ['\u{56a}', '\u{0}', '\u{0}']), ('\u{53b}', ['\u{56b}', '\u{0}', '\u{0}']),
- ('\u{53c}', ['\u{56c}', '\u{0}', '\u{0}']), ('\u{53d}', ['\u{56d}', '\u{0}', '\u{0}']),
- ('\u{53e}', ['\u{56e}', '\u{0}', '\u{0}']), ('\u{53f}', ['\u{56f}', '\u{0}', '\u{0}']),
- ('\u{540}', ['\u{570}', '\u{0}', '\u{0}']), ('\u{541}', ['\u{571}', '\u{0}', '\u{0}']),
- ('\u{542}', ['\u{572}', '\u{0}', '\u{0}']), ('\u{543}', ['\u{573}', '\u{0}', '\u{0}']),
- ('\u{544}', ['\u{574}', '\u{0}', '\u{0}']), ('\u{545}', ['\u{575}', '\u{0}', '\u{0}']),
- ('\u{546}', ['\u{576}', '\u{0}', '\u{0}']), ('\u{547}', ['\u{577}', '\u{0}', '\u{0}']),
- ('\u{548}', ['\u{578}', '\u{0}', '\u{0}']), ('\u{549}', ['\u{579}', '\u{0}', '\u{0}']),
- ('\u{54a}', ['\u{57a}', '\u{0}', '\u{0}']), ('\u{54b}', ['\u{57b}', '\u{0}', '\u{0}']),
- ('\u{54c}', ['\u{57c}', '\u{0}', '\u{0}']), ('\u{54d}', ['\u{57d}', '\u{0}', '\u{0}']),
- ('\u{54e}', ['\u{57e}', '\u{0}', '\u{0}']), ('\u{54f}', ['\u{57f}', '\u{0}', '\u{0}']),
- ('\u{550}', ['\u{580}', '\u{0}', '\u{0}']), ('\u{551}', ['\u{581}', '\u{0}', '\u{0}']),
- ('\u{552}', ['\u{582}', '\u{0}', '\u{0}']), ('\u{553}', ['\u{583}', '\u{0}', '\u{0}']),
- ('\u{554}', ['\u{584}', '\u{0}', '\u{0}']), ('\u{555}', ['\u{585}', '\u{0}', '\u{0}']),
- ('\u{556}', ['\u{586}', '\u{0}', '\u{0}']), ('\u{10a0}', ['\u{2d00}', '\u{0}', '\u{0}']),
- ('\u{10a1}', ['\u{2d01}', '\u{0}', '\u{0}']), ('\u{10a2}', ['\u{2d02}', '\u{0}', '\u{0}']),
- ('\u{10a3}', ['\u{2d03}', '\u{0}', '\u{0}']), ('\u{10a4}', ['\u{2d04}', '\u{0}', '\u{0}']),
- ('\u{10a5}', ['\u{2d05}', '\u{0}', '\u{0}']), ('\u{10a6}', ['\u{2d06}', '\u{0}', '\u{0}']),
- ('\u{10a7}', ['\u{2d07}', '\u{0}', '\u{0}']), ('\u{10a8}', ['\u{2d08}', '\u{0}', '\u{0}']),
- ('\u{10a9}', ['\u{2d09}', '\u{0}', '\u{0}']), ('\u{10aa}', ['\u{2d0a}', '\u{0}', '\u{0}']),
- ('\u{10ab}', ['\u{2d0b}', '\u{0}', '\u{0}']), ('\u{10ac}', ['\u{2d0c}', '\u{0}', '\u{0}']),
- ('\u{10ad}', ['\u{2d0d}', '\u{0}', '\u{0}']), ('\u{10ae}', ['\u{2d0e}', '\u{0}', '\u{0}']),
- ('\u{10af}', ['\u{2d0f}', '\u{0}', '\u{0}']), ('\u{10b0}', ['\u{2d10}', '\u{0}', '\u{0}']),
- ('\u{10b1}', ['\u{2d11}', '\u{0}', '\u{0}']), ('\u{10b2}', ['\u{2d12}', '\u{0}', '\u{0}']),
- ('\u{10b3}', ['\u{2d13}', '\u{0}', '\u{0}']), ('\u{10b4}', ['\u{2d14}', '\u{0}', '\u{0}']),
- ('\u{10b5}', ['\u{2d15}', '\u{0}', '\u{0}']), ('\u{10b6}', ['\u{2d16}', '\u{0}', '\u{0}']),
- ('\u{10b7}', ['\u{2d17}', '\u{0}', '\u{0}']), ('\u{10b8}', ['\u{2d18}', '\u{0}', '\u{0}']),
- ('\u{10b9}', ['\u{2d19}', '\u{0}', '\u{0}']), ('\u{10ba}', ['\u{2d1a}', '\u{0}', '\u{0}']),
- ('\u{10bb}', ['\u{2d1b}', '\u{0}', '\u{0}']), ('\u{10bc}', ['\u{2d1c}', '\u{0}', '\u{0}']),
- ('\u{10bd}', ['\u{2d1d}', '\u{0}', '\u{0}']), ('\u{10be}', ['\u{2d1e}', '\u{0}', '\u{0}']),
- ('\u{10bf}', ['\u{2d1f}', '\u{0}', '\u{0}']), ('\u{10c0}', ['\u{2d20}', '\u{0}', '\u{0}']),
- ('\u{10c1}', ['\u{2d21}', '\u{0}', '\u{0}']), ('\u{10c2}', ['\u{2d22}', '\u{0}', '\u{0}']),
- ('\u{10c3}', ['\u{2d23}', '\u{0}', '\u{0}']), ('\u{10c4}', ['\u{2d24}', '\u{0}', '\u{0}']),
- ('\u{10c5}', ['\u{2d25}', '\u{0}', '\u{0}']), ('\u{10c7}', ['\u{2d27}', '\u{0}', '\u{0}']),
- ('\u{10cd}', ['\u{2d2d}', '\u{0}', '\u{0}']), ('\u{13a0}', ['\u{ab70}', '\u{0}', '\u{0}']),
- ('\u{13a1}', ['\u{ab71}', '\u{0}', '\u{0}']), ('\u{13a2}', ['\u{ab72}', '\u{0}', '\u{0}']),
- ('\u{13a3}', ['\u{ab73}', '\u{0}', '\u{0}']), ('\u{13a4}', ['\u{ab74}', '\u{0}', '\u{0}']),
- ('\u{13a5}', ['\u{ab75}', '\u{0}', '\u{0}']), ('\u{13a6}', ['\u{ab76}', '\u{0}', '\u{0}']),
- ('\u{13a7}', ['\u{ab77}', '\u{0}', '\u{0}']), ('\u{13a8}', ['\u{ab78}', '\u{0}', '\u{0}']),
- ('\u{13a9}', ['\u{ab79}', '\u{0}', '\u{0}']), ('\u{13aa}', ['\u{ab7a}', '\u{0}', '\u{0}']),
- ('\u{13ab}', ['\u{ab7b}', '\u{0}', '\u{0}']), ('\u{13ac}', ['\u{ab7c}', '\u{0}', '\u{0}']),
- ('\u{13ad}', ['\u{ab7d}', '\u{0}', '\u{0}']), ('\u{13ae}', ['\u{ab7e}', '\u{0}', '\u{0}']),
- ('\u{13af}', ['\u{ab7f}', '\u{0}', '\u{0}']), ('\u{13b0}', ['\u{ab80}', '\u{0}', '\u{0}']),
- ('\u{13b1}', ['\u{ab81}', '\u{0}', '\u{0}']), ('\u{13b2}', ['\u{ab82}', '\u{0}', '\u{0}']),
- ('\u{13b3}', ['\u{ab83}', '\u{0}', '\u{0}']), ('\u{13b4}', ['\u{ab84}', '\u{0}', '\u{0}']),
- ('\u{13b5}', ['\u{ab85}', '\u{0}', '\u{0}']), ('\u{13b6}', ['\u{ab86}', '\u{0}', '\u{0}']),
- ('\u{13b7}', ['\u{ab87}', '\u{0}', '\u{0}']), ('\u{13b8}', ['\u{ab88}', '\u{0}', '\u{0}']),
- ('\u{13b9}', ['\u{ab89}', '\u{0}', '\u{0}']), ('\u{13ba}', ['\u{ab8a}', '\u{0}', '\u{0}']),
- ('\u{13bb}', ['\u{ab8b}', '\u{0}', '\u{0}']), ('\u{13bc}', ['\u{ab8c}', '\u{0}', '\u{0}']),
- ('\u{13bd}', ['\u{ab8d}', '\u{0}', '\u{0}']), ('\u{13be}', ['\u{ab8e}', '\u{0}', '\u{0}']),
- ('\u{13bf}', ['\u{ab8f}', '\u{0}', '\u{0}']), ('\u{13c0}', ['\u{ab90}', '\u{0}', '\u{0}']),
- ('\u{13c1}', ['\u{ab91}', '\u{0}', '\u{0}']), ('\u{13c2}', ['\u{ab92}', '\u{0}', '\u{0}']),
- ('\u{13c3}', ['\u{ab93}', '\u{0}', '\u{0}']), ('\u{13c4}', ['\u{ab94}', '\u{0}', '\u{0}']),
- ('\u{13c5}', ['\u{ab95}', '\u{0}', '\u{0}']), ('\u{13c6}', ['\u{ab96}', '\u{0}', '\u{0}']),
- ('\u{13c7}', ['\u{ab97}', '\u{0}', '\u{0}']), ('\u{13c8}', ['\u{ab98}', '\u{0}', '\u{0}']),
- ('\u{13c9}', ['\u{ab99}', '\u{0}', '\u{0}']), ('\u{13ca}', ['\u{ab9a}', '\u{0}', '\u{0}']),
- ('\u{13cb}', ['\u{ab9b}', '\u{0}', '\u{0}']), ('\u{13cc}', ['\u{ab9c}', '\u{0}', '\u{0}']),
- ('\u{13cd}', ['\u{ab9d}', '\u{0}', '\u{0}']), ('\u{13ce}', ['\u{ab9e}', '\u{0}', '\u{0}']),
- ('\u{13cf}', ['\u{ab9f}', '\u{0}', '\u{0}']), ('\u{13d0}', ['\u{aba0}', '\u{0}', '\u{0}']),
- ('\u{13d1}', ['\u{aba1}', '\u{0}', '\u{0}']), ('\u{13d2}', ['\u{aba2}', '\u{0}', '\u{0}']),
- ('\u{13d3}', ['\u{aba3}', '\u{0}', '\u{0}']), ('\u{13d4}', ['\u{aba4}', '\u{0}', '\u{0}']),
- ('\u{13d5}', ['\u{aba5}', '\u{0}', '\u{0}']), ('\u{13d6}', ['\u{aba6}', '\u{0}', '\u{0}']),
- ('\u{13d7}', ['\u{aba7}', '\u{0}', '\u{0}']), ('\u{13d8}', ['\u{aba8}', '\u{0}', '\u{0}']),
- ('\u{13d9}', ['\u{aba9}', '\u{0}', '\u{0}']), ('\u{13da}', ['\u{abaa}', '\u{0}', '\u{0}']),
- ('\u{13db}', ['\u{abab}', '\u{0}', '\u{0}']), ('\u{13dc}', ['\u{abac}', '\u{0}', '\u{0}']),
- ('\u{13dd}', ['\u{abad}', '\u{0}', '\u{0}']), ('\u{13de}', ['\u{abae}', '\u{0}', '\u{0}']),
- ('\u{13df}', ['\u{abaf}', '\u{0}', '\u{0}']), ('\u{13e0}', ['\u{abb0}', '\u{0}', '\u{0}']),
- ('\u{13e1}', ['\u{abb1}', '\u{0}', '\u{0}']), ('\u{13e2}', ['\u{abb2}', '\u{0}', '\u{0}']),
- ('\u{13e3}', ['\u{abb3}', '\u{0}', '\u{0}']), ('\u{13e4}', ['\u{abb4}', '\u{0}', '\u{0}']),
- ('\u{13e5}', ['\u{abb5}', '\u{0}', '\u{0}']), ('\u{13e6}', ['\u{abb6}', '\u{0}', '\u{0}']),
- ('\u{13e7}', ['\u{abb7}', '\u{0}', '\u{0}']), ('\u{13e8}', ['\u{abb8}', '\u{0}', '\u{0}']),
- ('\u{13e9}', ['\u{abb9}', '\u{0}', '\u{0}']), ('\u{13ea}', ['\u{abba}', '\u{0}', '\u{0}']),
- ('\u{13eb}', ['\u{abbb}', '\u{0}', '\u{0}']), ('\u{13ec}', ['\u{abbc}', '\u{0}', '\u{0}']),
- ('\u{13ed}', ['\u{abbd}', '\u{0}', '\u{0}']), ('\u{13ee}', ['\u{abbe}', '\u{0}', '\u{0}']),
- ('\u{13ef}', ['\u{abbf}', '\u{0}', '\u{0}']), ('\u{13f0}', ['\u{13f8}', '\u{0}', '\u{0}']),
- ('\u{13f1}', ['\u{13f9}', '\u{0}', '\u{0}']), ('\u{13f2}', ['\u{13fa}', '\u{0}', '\u{0}']),
- ('\u{13f3}', ['\u{13fb}', '\u{0}', '\u{0}']), ('\u{13f4}', ['\u{13fc}', '\u{0}', '\u{0}']),
- ('\u{13f5}', ['\u{13fd}', '\u{0}', '\u{0}']), ('\u{1c90}', ['\u{10d0}', '\u{0}', '\u{0}']),
- ('\u{1c91}', ['\u{10d1}', '\u{0}', '\u{0}']), ('\u{1c92}', ['\u{10d2}', '\u{0}', '\u{0}']),
- ('\u{1c93}', ['\u{10d3}', '\u{0}', '\u{0}']), ('\u{1c94}', ['\u{10d4}', '\u{0}', '\u{0}']),
- ('\u{1c95}', ['\u{10d5}', '\u{0}', '\u{0}']), ('\u{1c96}', ['\u{10d6}', '\u{0}', '\u{0}']),
- ('\u{1c97}', ['\u{10d7}', '\u{0}', '\u{0}']), ('\u{1c98}', ['\u{10d8}', '\u{0}', '\u{0}']),
- ('\u{1c99}', ['\u{10d9}', '\u{0}', '\u{0}']), ('\u{1c9a}', ['\u{10da}', '\u{0}', '\u{0}']),
- ('\u{1c9b}', ['\u{10db}', '\u{0}', '\u{0}']), ('\u{1c9c}', ['\u{10dc}', '\u{0}', '\u{0}']),
- ('\u{1c9d}', ['\u{10dd}', '\u{0}', '\u{0}']), ('\u{1c9e}', ['\u{10de}', '\u{0}', '\u{0}']),
- ('\u{1c9f}', ['\u{10df}', '\u{0}', '\u{0}']), ('\u{1ca0}', ['\u{10e0}', '\u{0}', '\u{0}']),
- ('\u{1ca1}', ['\u{10e1}', '\u{0}', '\u{0}']), ('\u{1ca2}', ['\u{10e2}', '\u{0}', '\u{0}']),
- ('\u{1ca3}', ['\u{10e3}', '\u{0}', '\u{0}']), ('\u{1ca4}', ['\u{10e4}', '\u{0}', '\u{0}']),
- ('\u{1ca5}', ['\u{10e5}', '\u{0}', '\u{0}']), ('\u{1ca6}', ['\u{10e6}', '\u{0}', '\u{0}']),
- ('\u{1ca7}', ['\u{10e7}', '\u{0}', '\u{0}']), ('\u{1ca8}', ['\u{10e8}', '\u{0}', '\u{0}']),
- ('\u{1ca9}', ['\u{10e9}', '\u{0}', '\u{0}']), ('\u{1caa}', ['\u{10ea}', '\u{0}', '\u{0}']),
- ('\u{1cab}', ['\u{10eb}', '\u{0}', '\u{0}']), ('\u{1cac}', ['\u{10ec}', '\u{0}', '\u{0}']),
- ('\u{1cad}', ['\u{10ed}', '\u{0}', '\u{0}']), ('\u{1cae}', ['\u{10ee}', '\u{0}', '\u{0}']),
- ('\u{1caf}', ['\u{10ef}', '\u{0}', '\u{0}']), ('\u{1cb0}', ['\u{10f0}', '\u{0}', '\u{0}']),
- ('\u{1cb1}', ['\u{10f1}', '\u{0}', '\u{0}']), ('\u{1cb2}', ['\u{10f2}', '\u{0}', '\u{0}']),
- ('\u{1cb3}', ['\u{10f3}', '\u{0}', '\u{0}']), ('\u{1cb4}', ['\u{10f4}', '\u{0}', '\u{0}']),
- ('\u{1cb5}', ['\u{10f5}', '\u{0}', '\u{0}']), ('\u{1cb6}', ['\u{10f6}', '\u{0}', '\u{0}']),
- ('\u{1cb7}', ['\u{10f7}', '\u{0}', '\u{0}']), ('\u{1cb8}', ['\u{10f8}', '\u{0}', '\u{0}']),
- ('\u{1cb9}', ['\u{10f9}', '\u{0}', '\u{0}']), ('\u{1cba}', ['\u{10fa}', '\u{0}', '\u{0}']),
- ('\u{1cbd}', ['\u{10fd}', '\u{0}', '\u{0}']), ('\u{1cbe}', ['\u{10fe}', '\u{0}', '\u{0}']),
- ('\u{1cbf}', ['\u{10ff}', '\u{0}', '\u{0}']), ('\u{1e00}', ['\u{1e01}', '\u{0}', '\u{0}']),
- ('\u{1e02}', ['\u{1e03}', '\u{0}', '\u{0}']), ('\u{1e04}', ['\u{1e05}', '\u{0}', '\u{0}']),
- ('\u{1e06}', ['\u{1e07}', '\u{0}', '\u{0}']), ('\u{1e08}', ['\u{1e09}', '\u{0}', '\u{0}']),
- ('\u{1e0a}', ['\u{1e0b}', '\u{0}', '\u{0}']), ('\u{1e0c}', ['\u{1e0d}', '\u{0}', '\u{0}']),
- ('\u{1e0e}', ['\u{1e0f}', '\u{0}', '\u{0}']), ('\u{1e10}', ['\u{1e11}', '\u{0}', '\u{0}']),
- ('\u{1e12}', ['\u{1e13}', '\u{0}', '\u{0}']), ('\u{1e14}', ['\u{1e15}', '\u{0}', '\u{0}']),
- ('\u{1e16}', ['\u{1e17}', '\u{0}', '\u{0}']), ('\u{1e18}', ['\u{1e19}', '\u{0}', '\u{0}']),
- ('\u{1e1a}', ['\u{1e1b}', '\u{0}', '\u{0}']), ('\u{1e1c}', ['\u{1e1d}', '\u{0}', '\u{0}']),
- ('\u{1e1e}', ['\u{1e1f}', '\u{0}', '\u{0}']), ('\u{1e20}', ['\u{1e21}', '\u{0}', '\u{0}']),
- ('\u{1e22}', ['\u{1e23}', '\u{0}', '\u{0}']), ('\u{1e24}', ['\u{1e25}', '\u{0}', '\u{0}']),
- ('\u{1e26}', ['\u{1e27}', '\u{0}', '\u{0}']), ('\u{1e28}', ['\u{1e29}', '\u{0}', '\u{0}']),
- ('\u{1e2a}', ['\u{1e2b}', '\u{0}', '\u{0}']), ('\u{1e2c}', ['\u{1e2d}', '\u{0}', '\u{0}']),
- ('\u{1e2e}', ['\u{1e2f}', '\u{0}', '\u{0}']), ('\u{1e30}', ['\u{1e31}', '\u{0}', '\u{0}']),
- ('\u{1e32}', ['\u{1e33}', '\u{0}', '\u{0}']), ('\u{1e34}', ['\u{1e35}', '\u{0}', '\u{0}']),
- ('\u{1e36}', ['\u{1e37}', '\u{0}', '\u{0}']), ('\u{1e38}', ['\u{1e39}', '\u{0}', '\u{0}']),
- ('\u{1e3a}', ['\u{1e3b}', '\u{0}', '\u{0}']), ('\u{1e3c}', ['\u{1e3d}', '\u{0}', '\u{0}']),
- ('\u{1e3e}', ['\u{1e3f}', '\u{0}', '\u{0}']), ('\u{1e40}', ['\u{1e41}', '\u{0}', '\u{0}']),
- ('\u{1e42}', ['\u{1e43}', '\u{0}', '\u{0}']), ('\u{1e44}', ['\u{1e45}', '\u{0}', '\u{0}']),
- ('\u{1e46}', ['\u{1e47}', '\u{0}', '\u{0}']), ('\u{1e48}', ['\u{1e49}', '\u{0}', '\u{0}']),
- ('\u{1e4a}', ['\u{1e4b}', '\u{0}', '\u{0}']), ('\u{1e4c}', ['\u{1e4d}', '\u{0}', '\u{0}']),
- ('\u{1e4e}', ['\u{1e4f}', '\u{0}', '\u{0}']), ('\u{1e50}', ['\u{1e51}', '\u{0}', '\u{0}']),
- ('\u{1e52}', ['\u{1e53}', '\u{0}', '\u{0}']), ('\u{1e54}', ['\u{1e55}', '\u{0}', '\u{0}']),
- ('\u{1e56}', ['\u{1e57}', '\u{0}', '\u{0}']), ('\u{1e58}', ['\u{1e59}', '\u{0}', '\u{0}']),
- ('\u{1e5a}', ['\u{1e5b}', '\u{0}', '\u{0}']), ('\u{1e5c}', ['\u{1e5d}', '\u{0}', '\u{0}']),
- ('\u{1e5e}', ['\u{1e5f}', '\u{0}', '\u{0}']), ('\u{1e60}', ['\u{1e61}', '\u{0}', '\u{0}']),
- ('\u{1e62}', ['\u{1e63}', '\u{0}', '\u{0}']), ('\u{1e64}', ['\u{1e65}', '\u{0}', '\u{0}']),
- ('\u{1e66}', ['\u{1e67}', '\u{0}', '\u{0}']), ('\u{1e68}', ['\u{1e69}', '\u{0}', '\u{0}']),
- ('\u{1e6a}', ['\u{1e6b}', '\u{0}', '\u{0}']), ('\u{1e6c}', ['\u{1e6d}', '\u{0}', '\u{0}']),
- ('\u{1e6e}', ['\u{1e6f}', '\u{0}', '\u{0}']), ('\u{1e70}', ['\u{1e71}', '\u{0}', '\u{0}']),
- ('\u{1e72}', ['\u{1e73}', '\u{0}', '\u{0}']), ('\u{1e74}', ['\u{1e75}', '\u{0}', '\u{0}']),
- ('\u{1e76}', ['\u{1e77}', '\u{0}', '\u{0}']), ('\u{1e78}', ['\u{1e79}', '\u{0}', '\u{0}']),
- ('\u{1e7a}', ['\u{1e7b}', '\u{0}', '\u{0}']), ('\u{1e7c}', ['\u{1e7d}', '\u{0}', '\u{0}']),
- ('\u{1e7e}', ['\u{1e7f}', '\u{0}', '\u{0}']), ('\u{1e80}', ['\u{1e81}', '\u{0}', '\u{0}']),
- ('\u{1e82}', ['\u{1e83}', '\u{0}', '\u{0}']), ('\u{1e84}', ['\u{1e85}', '\u{0}', '\u{0}']),
- ('\u{1e86}', ['\u{1e87}', '\u{0}', '\u{0}']), ('\u{1e88}', ['\u{1e89}', '\u{0}', '\u{0}']),
- ('\u{1e8a}', ['\u{1e8b}', '\u{0}', '\u{0}']), ('\u{1e8c}', ['\u{1e8d}', '\u{0}', '\u{0}']),
- ('\u{1e8e}', ['\u{1e8f}', '\u{0}', '\u{0}']), ('\u{1e90}', ['\u{1e91}', '\u{0}', '\u{0}']),
- ('\u{1e92}', ['\u{1e93}', '\u{0}', '\u{0}']), ('\u{1e94}', ['\u{1e95}', '\u{0}', '\u{0}']),
- ('\u{1e9e}', ['\u{df}', '\u{0}', '\u{0}']), ('\u{1ea0}', ['\u{1ea1}', '\u{0}', '\u{0}']),
- ('\u{1ea2}', ['\u{1ea3}', '\u{0}', '\u{0}']), ('\u{1ea4}', ['\u{1ea5}', '\u{0}', '\u{0}']),
- ('\u{1ea6}', ['\u{1ea7}', '\u{0}', '\u{0}']), ('\u{1ea8}', ['\u{1ea9}', '\u{0}', '\u{0}']),
- ('\u{1eaa}', ['\u{1eab}', '\u{0}', '\u{0}']), ('\u{1eac}', ['\u{1ead}', '\u{0}', '\u{0}']),
- ('\u{1eae}', ['\u{1eaf}', '\u{0}', '\u{0}']), ('\u{1eb0}', ['\u{1eb1}', '\u{0}', '\u{0}']),
- ('\u{1eb2}', ['\u{1eb3}', '\u{0}', '\u{0}']), ('\u{1eb4}', ['\u{1eb5}', '\u{0}', '\u{0}']),
- ('\u{1eb6}', ['\u{1eb7}', '\u{0}', '\u{0}']), ('\u{1eb8}', ['\u{1eb9}', '\u{0}', '\u{0}']),
- ('\u{1eba}', ['\u{1ebb}', '\u{0}', '\u{0}']), ('\u{1ebc}', ['\u{1ebd}', '\u{0}', '\u{0}']),
- ('\u{1ebe}', ['\u{1ebf}', '\u{0}', '\u{0}']), ('\u{1ec0}', ['\u{1ec1}', '\u{0}', '\u{0}']),
- ('\u{1ec2}', ['\u{1ec3}', '\u{0}', '\u{0}']), ('\u{1ec4}', ['\u{1ec5}', '\u{0}', '\u{0}']),
- ('\u{1ec6}', ['\u{1ec7}', '\u{0}', '\u{0}']), ('\u{1ec8}', ['\u{1ec9}', '\u{0}', '\u{0}']),
- ('\u{1eca}', ['\u{1ecb}', '\u{0}', '\u{0}']), ('\u{1ecc}', ['\u{1ecd}', '\u{0}', '\u{0}']),
- ('\u{1ece}', ['\u{1ecf}', '\u{0}', '\u{0}']), ('\u{1ed0}', ['\u{1ed1}', '\u{0}', '\u{0}']),
- ('\u{1ed2}', ['\u{1ed3}', '\u{0}', '\u{0}']), ('\u{1ed4}', ['\u{1ed5}', '\u{0}', '\u{0}']),
- ('\u{1ed6}', ['\u{1ed7}', '\u{0}', '\u{0}']), ('\u{1ed8}', ['\u{1ed9}', '\u{0}', '\u{0}']),
- ('\u{1eda}', ['\u{1edb}', '\u{0}', '\u{0}']), ('\u{1edc}', ['\u{1edd}', '\u{0}', '\u{0}']),
- ('\u{1ede}', ['\u{1edf}', '\u{0}', '\u{0}']), ('\u{1ee0}', ['\u{1ee1}', '\u{0}', '\u{0}']),
- ('\u{1ee2}', ['\u{1ee3}', '\u{0}', '\u{0}']), ('\u{1ee4}', ['\u{1ee5}', '\u{0}', '\u{0}']),
- ('\u{1ee6}', ['\u{1ee7}', '\u{0}', '\u{0}']), ('\u{1ee8}', ['\u{1ee9}', '\u{0}', '\u{0}']),
- ('\u{1eea}', ['\u{1eeb}', '\u{0}', '\u{0}']), ('\u{1eec}', ['\u{1eed}', '\u{0}', '\u{0}']),
- ('\u{1eee}', ['\u{1eef}', '\u{0}', '\u{0}']), ('\u{1ef0}', ['\u{1ef1}', '\u{0}', '\u{0}']),
- ('\u{1ef2}', ['\u{1ef3}', '\u{0}', '\u{0}']), ('\u{1ef4}', ['\u{1ef5}', '\u{0}', '\u{0}']),
- ('\u{1ef6}', ['\u{1ef7}', '\u{0}', '\u{0}']), ('\u{1ef8}', ['\u{1ef9}', '\u{0}', '\u{0}']),
- ('\u{1efa}', ['\u{1efb}', '\u{0}', '\u{0}']), ('\u{1efc}', ['\u{1efd}', '\u{0}', '\u{0}']),
- ('\u{1efe}', ['\u{1eff}', '\u{0}', '\u{0}']), ('\u{1f08}', ['\u{1f00}', '\u{0}', '\u{0}']),
- ('\u{1f09}', ['\u{1f01}', '\u{0}', '\u{0}']), ('\u{1f0a}', ['\u{1f02}', '\u{0}', '\u{0}']),
- ('\u{1f0b}', ['\u{1f03}', '\u{0}', '\u{0}']), ('\u{1f0c}', ['\u{1f04}', '\u{0}', '\u{0}']),
- ('\u{1f0d}', ['\u{1f05}', '\u{0}', '\u{0}']), ('\u{1f0e}', ['\u{1f06}', '\u{0}', '\u{0}']),
- ('\u{1f0f}', ['\u{1f07}', '\u{0}', '\u{0}']), ('\u{1f18}', ['\u{1f10}', '\u{0}', '\u{0}']),
- ('\u{1f19}', ['\u{1f11}', '\u{0}', '\u{0}']), ('\u{1f1a}', ['\u{1f12}', '\u{0}', '\u{0}']),
- ('\u{1f1b}', ['\u{1f13}', '\u{0}', '\u{0}']), ('\u{1f1c}', ['\u{1f14}', '\u{0}', '\u{0}']),
- ('\u{1f1d}', ['\u{1f15}', '\u{0}', '\u{0}']), ('\u{1f28}', ['\u{1f20}', '\u{0}', '\u{0}']),
- ('\u{1f29}', ['\u{1f21}', '\u{0}', '\u{0}']), ('\u{1f2a}', ['\u{1f22}', '\u{0}', '\u{0}']),
- ('\u{1f2b}', ['\u{1f23}', '\u{0}', '\u{0}']), ('\u{1f2c}', ['\u{1f24}', '\u{0}', '\u{0}']),
- ('\u{1f2d}', ['\u{1f25}', '\u{0}', '\u{0}']), ('\u{1f2e}', ['\u{1f26}', '\u{0}', '\u{0}']),
- ('\u{1f2f}', ['\u{1f27}', '\u{0}', '\u{0}']), ('\u{1f38}', ['\u{1f30}', '\u{0}', '\u{0}']),
- ('\u{1f39}', ['\u{1f31}', '\u{0}', '\u{0}']), ('\u{1f3a}', ['\u{1f32}', '\u{0}', '\u{0}']),
- ('\u{1f3b}', ['\u{1f33}', '\u{0}', '\u{0}']), ('\u{1f3c}', ['\u{1f34}', '\u{0}', '\u{0}']),
- ('\u{1f3d}', ['\u{1f35}', '\u{0}', '\u{0}']), ('\u{1f3e}', ['\u{1f36}', '\u{0}', '\u{0}']),
- ('\u{1f3f}', ['\u{1f37}', '\u{0}', '\u{0}']), ('\u{1f48}', ['\u{1f40}', '\u{0}', '\u{0}']),
- ('\u{1f49}', ['\u{1f41}', '\u{0}', '\u{0}']), ('\u{1f4a}', ['\u{1f42}', '\u{0}', '\u{0}']),
- ('\u{1f4b}', ['\u{1f43}', '\u{0}', '\u{0}']), ('\u{1f4c}', ['\u{1f44}', '\u{0}', '\u{0}']),
- ('\u{1f4d}', ['\u{1f45}', '\u{0}', '\u{0}']), ('\u{1f59}', ['\u{1f51}', '\u{0}', '\u{0}']),
- ('\u{1f5b}', ['\u{1f53}', '\u{0}', '\u{0}']), ('\u{1f5d}', ['\u{1f55}', '\u{0}', '\u{0}']),
- ('\u{1f5f}', ['\u{1f57}', '\u{0}', '\u{0}']), ('\u{1f68}', ['\u{1f60}', '\u{0}', '\u{0}']),
- ('\u{1f69}', ['\u{1f61}', '\u{0}', '\u{0}']), ('\u{1f6a}', ['\u{1f62}', '\u{0}', '\u{0}']),
- ('\u{1f6b}', ['\u{1f63}', '\u{0}', '\u{0}']), ('\u{1f6c}', ['\u{1f64}', '\u{0}', '\u{0}']),
- ('\u{1f6d}', ['\u{1f65}', '\u{0}', '\u{0}']), ('\u{1f6e}', ['\u{1f66}', '\u{0}', '\u{0}']),
- ('\u{1f6f}', ['\u{1f67}', '\u{0}', '\u{0}']), ('\u{1f88}', ['\u{1f80}', '\u{0}', '\u{0}']),
- ('\u{1f89}', ['\u{1f81}', '\u{0}', '\u{0}']), ('\u{1f8a}', ['\u{1f82}', '\u{0}', '\u{0}']),
- ('\u{1f8b}', ['\u{1f83}', '\u{0}', '\u{0}']), ('\u{1f8c}', ['\u{1f84}', '\u{0}', '\u{0}']),
- ('\u{1f8d}', ['\u{1f85}', '\u{0}', '\u{0}']), ('\u{1f8e}', ['\u{1f86}', '\u{0}', '\u{0}']),
- ('\u{1f8f}', ['\u{1f87}', '\u{0}', '\u{0}']), ('\u{1f98}', ['\u{1f90}', '\u{0}', '\u{0}']),
- ('\u{1f99}', ['\u{1f91}', '\u{0}', '\u{0}']), ('\u{1f9a}', ['\u{1f92}', '\u{0}', '\u{0}']),
- ('\u{1f9b}', ['\u{1f93}', '\u{0}', '\u{0}']), ('\u{1f9c}', ['\u{1f94}', '\u{0}', '\u{0}']),
- ('\u{1f9d}', ['\u{1f95}', '\u{0}', '\u{0}']), ('\u{1f9e}', ['\u{1f96}', '\u{0}', '\u{0}']),
- ('\u{1f9f}', ['\u{1f97}', '\u{0}', '\u{0}']), ('\u{1fa8}', ['\u{1fa0}', '\u{0}', '\u{0}']),
- ('\u{1fa9}', ['\u{1fa1}', '\u{0}', '\u{0}']), ('\u{1faa}', ['\u{1fa2}', '\u{0}', '\u{0}']),
- ('\u{1fab}', ['\u{1fa3}', '\u{0}', '\u{0}']), ('\u{1fac}', ['\u{1fa4}', '\u{0}', '\u{0}']),
- ('\u{1fad}', ['\u{1fa5}', '\u{0}', '\u{0}']), ('\u{1fae}', ['\u{1fa6}', '\u{0}', '\u{0}']),
- ('\u{1faf}', ['\u{1fa7}', '\u{0}', '\u{0}']), ('\u{1fb8}', ['\u{1fb0}', '\u{0}', '\u{0}']),
- ('\u{1fb9}', ['\u{1fb1}', '\u{0}', '\u{0}']), ('\u{1fba}', ['\u{1f70}', '\u{0}', '\u{0}']),
- ('\u{1fbb}', ['\u{1f71}', '\u{0}', '\u{0}']), ('\u{1fbc}', ['\u{1fb3}', '\u{0}', '\u{0}']),
- ('\u{1fc8}', ['\u{1f72}', '\u{0}', '\u{0}']), ('\u{1fc9}', ['\u{1f73}', '\u{0}', '\u{0}']),
- ('\u{1fca}', ['\u{1f74}', '\u{0}', '\u{0}']), ('\u{1fcb}', ['\u{1f75}', '\u{0}', '\u{0}']),
- ('\u{1fcc}', ['\u{1fc3}', '\u{0}', '\u{0}']), ('\u{1fd8}', ['\u{1fd0}', '\u{0}', '\u{0}']),
- ('\u{1fd9}', ['\u{1fd1}', '\u{0}', '\u{0}']), ('\u{1fda}', ['\u{1f76}', '\u{0}', '\u{0}']),
- ('\u{1fdb}', ['\u{1f77}', '\u{0}', '\u{0}']), ('\u{1fe8}', ['\u{1fe0}', '\u{0}', '\u{0}']),
- ('\u{1fe9}', ['\u{1fe1}', '\u{0}', '\u{0}']), ('\u{1fea}', ['\u{1f7a}', '\u{0}', '\u{0}']),
- ('\u{1feb}', ['\u{1f7b}', '\u{0}', '\u{0}']), ('\u{1fec}', ['\u{1fe5}', '\u{0}', '\u{0}']),
- ('\u{1ff8}', ['\u{1f78}', '\u{0}', '\u{0}']), ('\u{1ff9}', ['\u{1f79}', '\u{0}', '\u{0}']),
- ('\u{1ffa}', ['\u{1f7c}', '\u{0}', '\u{0}']), ('\u{1ffb}', ['\u{1f7d}', '\u{0}', '\u{0}']),
- ('\u{1ffc}', ['\u{1ff3}', '\u{0}', '\u{0}']), ('\u{2126}', ['\u{3c9}', '\u{0}', '\u{0}']),
- ('\u{212a}', ['k', '\u{0}', '\u{0}']), ('\u{212b}', ['\u{e5}', '\u{0}', '\u{0}']),
- ('\u{2132}', ['\u{214e}', '\u{0}', '\u{0}']), ('\u{2160}', ['\u{2170}', '\u{0}', '\u{0}']),
- ('\u{2161}', ['\u{2171}', '\u{0}', '\u{0}']), ('\u{2162}', ['\u{2172}', '\u{0}', '\u{0}']),
- ('\u{2163}', ['\u{2173}', '\u{0}', '\u{0}']), ('\u{2164}', ['\u{2174}', '\u{0}', '\u{0}']),
- ('\u{2165}', ['\u{2175}', '\u{0}', '\u{0}']), ('\u{2166}', ['\u{2176}', '\u{0}', '\u{0}']),
- ('\u{2167}', ['\u{2177}', '\u{0}', '\u{0}']), ('\u{2168}', ['\u{2178}', '\u{0}', '\u{0}']),
- ('\u{2169}', ['\u{2179}', '\u{0}', '\u{0}']), ('\u{216a}', ['\u{217a}', '\u{0}', '\u{0}']),
- ('\u{216b}', ['\u{217b}', '\u{0}', '\u{0}']), ('\u{216c}', ['\u{217c}', '\u{0}', '\u{0}']),
- ('\u{216d}', ['\u{217d}', '\u{0}', '\u{0}']), ('\u{216e}', ['\u{217e}', '\u{0}', '\u{0}']),
- ('\u{216f}', ['\u{217f}', '\u{0}', '\u{0}']), ('\u{2183}', ['\u{2184}', '\u{0}', '\u{0}']),
- ('\u{24b6}', ['\u{24d0}', '\u{0}', '\u{0}']), ('\u{24b7}', ['\u{24d1}', '\u{0}', '\u{0}']),
- ('\u{24b8}', ['\u{24d2}', '\u{0}', '\u{0}']), ('\u{24b9}', ['\u{24d3}', '\u{0}', '\u{0}']),
- ('\u{24ba}', ['\u{24d4}', '\u{0}', '\u{0}']), ('\u{24bb}', ['\u{24d5}', '\u{0}', '\u{0}']),
- ('\u{24bc}', ['\u{24d6}', '\u{0}', '\u{0}']), ('\u{24bd}', ['\u{24d7}', '\u{0}', '\u{0}']),
- ('\u{24be}', ['\u{24d8}', '\u{0}', '\u{0}']), ('\u{24bf}', ['\u{24d9}', '\u{0}', '\u{0}']),
- ('\u{24c0}', ['\u{24da}', '\u{0}', '\u{0}']), ('\u{24c1}', ['\u{24db}', '\u{0}', '\u{0}']),
- ('\u{24c2}', ['\u{24dc}', '\u{0}', '\u{0}']), ('\u{24c3}', ['\u{24dd}', '\u{0}', '\u{0}']),
- ('\u{24c4}', ['\u{24de}', '\u{0}', '\u{0}']), ('\u{24c5}', ['\u{24df}', '\u{0}', '\u{0}']),
- ('\u{24c6}', ['\u{24e0}', '\u{0}', '\u{0}']), ('\u{24c7}', ['\u{24e1}', '\u{0}', '\u{0}']),
- ('\u{24c8}', ['\u{24e2}', '\u{0}', '\u{0}']), ('\u{24c9}', ['\u{24e3}', '\u{0}', '\u{0}']),
- ('\u{24ca}', ['\u{24e4}', '\u{0}', '\u{0}']), ('\u{24cb}', ['\u{24e5}', '\u{0}', '\u{0}']),
- ('\u{24cc}', ['\u{24e6}', '\u{0}', '\u{0}']), ('\u{24cd}', ['\u{24e7}', '\u{0}', '\u{0}']),
- ('\u{24ce}', ['\u{24e8}', '\u{0}', '\u{0}']), ('\u{24cf}', ['\u{24e9}', '\u{0}', '\u{0}']),
- ('\u{2c00}', ['\u{2c30}', '\u{0}', '\u{0}']), ('\u{2c01}', ['\u{2c31}', '\u{0}', '\u{0}']),
- ('\u{2c02}', ['\u{2c32}', '\u{0}', '\u{0}']), ('\u{2c03}', ['\u{2c33}', '\u{0}', '\u{0}']),
- ('\u{2c04}', ['\u{2c34}', '\u{0}', '\u{0}']), ('\u{2c05}', ['\u{2c35}', '\u{0}', '\u{0}']),
- ('\u{2c06}', ['\u{2c36}', '\u{0}', '\u{0}']), ('\u{2c07}', ['\u{2c37}', '\u{0}', '\u{0}']),
- ('\u{2c08}', ['\u{2c38}', '\u{0}', '\u{0}']), ('\u{2c09}', ['\u{2c39}', '\u{0}', '\u{0}']),
- ('\u{2c0a}', ['\u{2c3a}', '\u{0}', '\u{0}']), ('\u{2c0b}', ['\u{2c3b}', '\u{0}', '\u{0}']),
- ('\u{2c0c}', ['\u{2c3c}', '\u{0}', '\u{0}']), ('\u{2c0d}', ['\u{2c3d}', '\u{0}', '\u{0}']),
- ('\u{2c0e}', ['\u{2c3e}', '\u{0}', '\u{0}']), ('\u{2c0f}', ['\u{2c3f}', '\u{0}', '\u{0}']),
- ('\u{2c10}', ['\u{2c40}', '\u{0}', '\u{0}']), ('\u{2c11}', ['\u{2c41}', '\u{0}', '\u{0}']),
- ('\u{2c12}', ['\u{2c42}', '\u{0}', '\u{0}']), ('\u{2c13}', ['\u{2c43}', '\u{0}', '\u{0}']),
- ('\u{2c14}', ['\u{2c44}', '\u{0}', '\u{0}']), ('\u{2c15}', ['\u{2c45}', '\u{0}', '\u{0}']),
- ('\u{2c16}', ['\u{2c46}', '\u{0}', '\u{0}']), ('\u{2c17}', ['\u{2c47}', '\u{0}', '\u{0}']),
- ('\u{2c18}', ['\u{2c48}', '\u{0}', '\u{0}']), ('\u{2c19}', ['\u{2c49}', '\u{0}', '\u{0}']),
- ('\u{2c1a}', ['\u{2c4a}', '\u{0}', '\u{0}']), ('\u{2c1b}', ['\u{2c4b}', '\u{0}', '\u{0}']),
- ('\u{2c1c}', ['\u{2c4c}', '\u{0}', '\u{0}']), ('\u{2c1d}', ['\u{2c4d}', '\u{0}', '\u{0}']),
- ('\u{2c1e}', ['\u{2c4e}', '\u{0}', '\u{0}']), ('\u{2c1f}', ['\u{2c4f}', '\u{0}', '\u{0}']),
- ('\u{2c20}', ['\u{2c50}', '\u{0}', '\u{0}']), ('\u{2c21}', ['\u{2c51}', '\u{0}', '\u{0}']),
- ('\u{2c22}', ['\u{2c52}', '\u{0}', '\u{0}']), ('\u{2c23}', ['\u{2c53}', '\u{0}', '\u{0}']),
- ('\u{2c24}', ['\u{2c54}', '\u{0}', '\u{0}']), ('\u{2c25}', ['\u{2c55}', '\u{0}', '\u{0}']),
- ('\u{2c26}', ['\u{2c56}', '\u{0}', '\u{0}']), ('\u{2c27}', ['\u{2c57}', '\u{0}', '\u{0}']),
- ('\u{2c28}', ['\u{2c58}', '\u{0}', '\u{0}']), ('\u{2c29}', ['\u{2c59}', '\u{0}', '\u{0}']),
- ('\u{2c2a}', ['\u{2c5a}', '\u{0}', '\u{0}']), ('\u{2c2b}', ['\u{2c5b}', '\u{0}', '\u{0}']),
- ('\u{2c2c}', ['\u{2c5c}', '\u{0}', '\u{0}']), ('\u{2c2d}', ['\u{2c5d}', '\u{0}', '\u{0}']),
- ('\u{2c2e}', ['\u{2c5e}', '\u{0}', '\u{0}']), ('\u{2c2f}', ['\u{2c5f}', '\u{0}', '\u{0}']),
- ('\u{2c60}', ['\u{2c61}', '\u{0}', '\u{0}']), ('\u{2c62}', ['\u{26b}', '\u{0}', '\u{0}']),
- ('\u{2c63}', ['\u{1d7d}', '\u{0}', '\u{0}']), ('\u{2c64}', ['\u{27d}', '\u{0}', '\u{0}']),
- ('\u{2c67}', ['\u{2c68}', '\u{0}', '\u{0}']), ('\u{2c69}', ['\u{2c6a}', '\u{0}', '\u{0}']),
- ('\u{2c6b}', ['\u{2c6c}', '\u{0}', '\u{0}']), ('\u{2c6d}', ['\u{251}', '\u{0}', '\u{0}']),
- ('\u{2c6e}', ['\u{271}', '\u{0}', '\u{0}']), ('\u{2c6f}', ['\u{250}', '\u{0}', '\u{0}']),
- ('\u{2c70}', ['\u{252}', '\u{0}', '\u{0}']), ('\u{2c72}', ['\u{2c73}', '\u{0}', '\u{0}']),
- ('\u{2c75}', ['\u{2c76}', '\u{0}', '\u{0}']), ('\u{2c7e}', ['\u{23f}', '\u{0}', '\u{0}']),
- ('\u{2c7f}', ['\u{240}', '\u{0}', '\u{0}']), ('\u{2c80}', ['\u{2c81}', '\u{0}', '\u{0}']),
- ('\u{2c82}', ['\u{2c83}', '\u{0}', '\u{0}']), ('\u{2c84}', ['\u{2c85}', '\u{0}', '\u{0}']),
- ('\u{2c86}', ['\u{2c87}', '\u{0}', '\u{0}']), ('\u{2c88}', ['\u{2c89}', '\u{0}', '\u{0}']),
- ('\u{2c8a}', ['\u{2c8b}', '\u{0}', '\u{0}']), ('\u{2c8c}', ['\u{2c8d}', '\u{0}', '\u{0}']),
- ('\u{2c8e}', ['\u{2c8f}', '\u{0}', '\u{0}']), ('\u{2c90}', ['\u{2c91}', '\u{0}', '\u{0}']),
- ('\u{2c92}', ['\u{2c93}', '\u{0}', '\u{0}']), ('\u{2c94}', ['\u{2c95}', '\u{0}', '\u{0}']),
- ('\u{2c96}', ['\u{2c97}', '\u{0}', '\u{0}']), ('\u{2c98}', ['\u{2c99}', '\u{0}', '\u{0}']),
- ('\u{2c9a}', ['\u{2c9b}', '\u{0}', '\u{0}']), ('\u{2c9c}', ['\u{2c9d}', '\u{0}', '\u{0}']),
- ('\u{2c9e}', ['\u{2c9f}', '\u{0}', '\u{0}']), ('\u{2ca0}', ['\u{2ca1}', '\u{0}', '\u{0}']),
- ('\u{2ca2}', ['\u{2ca3}', '\u{0}', '\u{0}']), ('\u{2ca4}', ['\u{2ca5}', '\u{0}', '\u{0}']),
- ('\u{2ca6}', ['\u{2ca7}', '\u{0}', '\u{0}']), ('\u{2ca8}', ['\u{2ca9}', '\u{0}', '\u{0}']),
- ('\u{2caa}', ['\u{2cab}', '\u{0}', '\u{0}']), ('\u{2cac}', ['\u{2cad}', '\u{0}', '\u{0}']),
- ('\u{2cae}', ['\u{2caf}', '\u{0}', '\u{0}']), ('\u{2cb0}', ['\u{2cb1}', '\u{0}', '\u{0}']),
- ('\u{2cb2}', ['\u{2cb3}', '\u{0}', '\u{0}']), ('\u{2cb4}', ['\u{2cb5}', '\u{0}', '\u{0}']),
- ('\u{2cb6}', ['\u{2cb7}', '\u{0}', '\u{0}']), ('\u{2cb8}', ['\u{2cb9}', '\u{0}', '\u{0}']),
- ('\u{2cba}', ['\u{2cbb}', '\u{0}', '\u{0}']), ('\u{2cbc}', ['\u{2cbd}', '\u{0}', '\u{0}']),
- ('\u{2cbe}', ['\u{2cbf}', '\u{0}', '\u{0}']), ('\u{2cc0}', ['\u{2cc1}', '\u{0}', '\u{0}']),
- ('\u{2cc2}', ['\u{2cc3}', '\u{0}', '\u{0}']), ('\u{2cc4}', ['\u{2cc5}', '\u{0}', '\u{0}']),
- ('\u{2cc6}', ['\u{2cc7}', '\u{0}', '\u{0}']), ('\u{2cc8}', ['\u{2cc9}', '\u{0}', '\u{0}']),
- ('\u{2cca}', ['\u{2ccb}', '\u{0}', '\u{0}']), ('\u{2ccc}', ['\u{2ccd}', '\u{0}', '\u{0}']),
- ('\u{2cce}', ['\u{2ccf}', '\u{0}', '\u{0}']), ('\u{2cd0}', ['\u{2cd1}', '\u{0}', '\u{0}']),
- ('\u{2cd2}', ['\u{2cd3}', '\u{0}', '\u{0}']), ('\u{2cd4}', ['\u{2cd5}', '\u{0}', '\u{0}']),
- ('\u{2cd6}', ['\u{2cd7}', '\u{0}', '\u{0}']), ('\u{2cd8}', ['\u{2cd9}', '\u{0}', '\u{0}']),
- ('\u{2cda}', ['\u{2cdb}', '\u{0}', '\u{0}']), ('\u{2cdc}', ['\u{2cdd}', '\u{0}', '\u{0}']),
- ('\u{2cde}', ['\u{2cdf}', '\u{0}', '\u{0}']), ('\u{2ce0}', ['\u{2ce1}', '\u{0}', '\u{0}']),
- ('\u{2ce2}', ['\u{2ce3}', '\u{0}', '\u{0}']), ('\u{2ceb}', ['\u{2cec}', '\u{0}', '\u{0}']),
- ('\u{2ced}', ['\u{2cee}', '\u{0}', '\u{0}']), ('\u{2cf2}', ['\u{2cf3}', '\u{0}', '\u{0}']),
- ('\u{a640}', ['\u{a641}', '\u{0}', '\u{0}']), ('\u{a642}', ['\u{a643}', '\u{0}', '\u{0}']),
- ('\u{a644}', ['\u{a645}', '\u{0}', '\u{0}']), ('\u{a646}', ['\u{a647}', '\u{0}', '\u{0}']),
- ('\u{a648}', ['\u{a649}', '\u{0}', '\u{0}']), ('\u{a64a}', ['\u{a64b}', '\u{0}', '\u{0}']),
- ('\u{a64c}', ['\u{a64d}', '\u{0}', '\u{0}']), ('\u{a64e}', ['\u{a64f}', '\u{0}', '\u{0}']),
- ('\u{a650}', ['\u{a651}', '\u{0}', '\u{0}']), ('\u{a652}', ['\u{a653}', '\u{0}', '\u{0}']),
- ('\u{a654}', ['\u{a655}', '\u{0}', '\u{0}']), ('\u{a656}', ['\u{a657}', '\u{0}', '\u{0}']),
- ('\u{a658}', ['\u{a659}', '\u{0}', '\u{0}']), ('\u{a65a}', ['\u{a65b}', '\u{0}', '\u{0}']),
- ('\u{a65c}', ['\u{a65d}', '\u{0}', '\u{0}']), ('\u{a65e}', ['\u{a65f}', '\u{0}', '\u{0}']),
- ('\u{a660}', ['\u{a661}', '\u{0}', '\u{0}']), ('\u{a662}', ['\u{a663}', '\u{0}', '\u{0}']),
- ('\u{a664}', ['\u{a665}', '\u{0}', '\u{0}']), ('\u{a666}', ['\u{a667}', '\u{0}', '\u{0}']),
- ('\u{a668}', ['\u{a669}', '\u{0}', '\u{0}']), ('\u{a66a}', ['\u{a66b}', '\u{0}', '\u{0}']),
- ('\u{a66c}', ['\u{a66d}', '\u{0}', '\u{0}']), ('\u{a680}', ['\u{a681}', '\u{0}', '\u{0}']),
- ('\u{a682}', ['\u{a683}', '\u{0}', '\u{0}']), ('\u{a684}', ['\u{a685}', '\u{0}', '\u{0}']),
- ('\u{a686}', ['\u{a687}', '\u{0}', '\u{0}']), ('\u{a688}', ['\u{a689}', '\u{0}', '\u{0}']),
- ('\u{a68a}', ['\u{a68b}', '\u{0}', '\u{0}']), ('\u{a68c}', ['\u{a68d}', '\u{0}', '\u{0}']),
- ('\u{a68e}', ['\u{a68f}', '\u{0}', '\u{0}']), ('\u{a690}', ['\u{a691}', '\u{0}', '\u{0}']),
- ('\u{a692}', ['\u{a693}', '\u{0}', '\u{0}']), ('\u{a694}', ['\u{a695}', '\u{0}', '\u{0}']),
- ('\u{a696}', ['\u{a697}', '\u{0}', '\u{0}']), ('\u{a698}', ['\u{a699}', '\u{0}', '\u{0}']),
- ('\u{a69a}', ['\u{a69b}', '\u{0}', '\u{0}']), ('\u{a722}', ['\u{a723}', '\u{0}', '\u{0}']),
- ('\u{a724}', ['\u{a725}', '\u{0}', '\u{0}']), ('\u{a726}', ['\u{a727}', '\u{0}', '\u{0}']),
- ('\u{a728}', ['\u{a729}', '\u{0}', '\u{0}']), ('\u{a72a}', ['\u{a72b}', '\u{0}', '\u{0}']),
- ('\u{a72c}', ['\u{a72d}', '\u{0}', '\u{0}']), ('\u{a72e}', ['\u{a72f}', '\u{0}', '\u{0}']),
- ('\u{a732}', ['\u{a733}', '\u{0}', '\u{0}']), ('\u{a734}', ['\u{a735}', '\u{0}', '\u{0}']),
- ('\u{a736}', ['\u{a737}', '\u{0}', '\u{0}']), ('\u{a738}', ['\u{a739}', '\u{0}', '\u{0}']),
- ('\u{a73a}', ['\u{a73b}', '\u{0}', '\u{0}']), ('\u{a73c}', ['\u{a73d}', '\u{0}', '\u{0}']),
- ('\u{a73e}', ['\u{a73f}', '\u{0}', '\u{0}']), ('\u{a740}', ['\u{a741}', '\u{0}', '\u{0}']),
- ('\u{a742}', ['\u{a743}', '\u{0}', '\u{0}']), ('\u{a744}', ['\u{a745}', '\u{0}', '\u{0}']),
- ('\u{a746}', ['\u{a747}', '\u{0}', '\u{0}']), ('\u{a748}', ['\u{a749}', '\u{0}', '\u{0}']),
- ('\u{a74a}', ['\u{a74b}', '\u{0}', '\u{0}']), ('\u{a74c}', ['\u{a74d}', '\u{0}', '\u{0}']),
- ('\u{a74e}', ['\u{a74f}', '\u{0}', '\u{0}']), ('\u{a750}', ['\u{a751}', '\u{0}', '\u{0}']),
- ('\u{a752}', ['\u{a753}', '\u{0}', '\u{0}']), ('\u{a754}', ['\u{a755}', '\u{0}', '\u{0}']),
- ('\u{a756}', ['\u{a757}', '\u{0}', '\u{0}']), ('\u{a758}', ['\u{a759}', '\u{0}', '\u{0}']),
- ('\u{a75a}', ['\u{a75b}', '\u{0}', '\u{0}']), ('\u{a75c}', ['\u{a75d}', '\u{0}', '\u{0}']),
- ('\u{a75e}', ['\u{a75f}', '\u{0}', '\u{0}']), ('\u{a760}', ['\u{a761}', '\u{0}', '\u{0}']),
- ('\u{a762}', ['\u{a763}', '\u{0}', '\u{0}']), ('\u{a764}', ['\u{a765}', '\u{0}', '\u{0}']),
- ('\u{a766}', ['\u{a767}', '\u{0}', '\u{0}']), ('\u{a768}', ['\u{a769}', '\u{0}', '\u{0}']),
- ('\u{a76a}', ['\u{a76b}', '\u{0}', '\u{0}']), ('\u{a76c}', ['\u{a76d}', '\u{0}', '\u{0}']),
- ('\u{a76e}', ['\u{a76f}', '\u{0}', '\u{0}']), ('\u{a779}', ['\u{a77a}', '\u{0}', '\u{0}']),
- ('\u{a77b}', ['\u{a77c}', '\u{0}', '\u{0}']), ('\u{a77d}', ['\u{1d79}', '\u{0}', '\u{0}']),
- ('\u{a77e}', ['\u{a77f}', '\u{0}', '\u{0}']), ('\u{a780}', ['\u{a781}', '\u{0}', '\u{0}']),
- ('\u{a782}', ['\u{a783}', '\u{0}', '\u{0}']), ('\u{a784}', ['\u{a785}', '\u{0}', '\u{0}']),
- ('\u{a786}', ['\u{a787}', '\u{0}', '\u{0}']), ('\u{a78b}', ['\u{a78c}', '\u{0}', '\u{0}']),
- ('\u{a78d}', ['\u{265}', '\u{0}', '\u{0}']), ('\u{a790}', ['\u{a791}', '\u{0}', '\u{0}']),
- ('\u{a792}', ['\u{a793}', '\u{0}', '\u{0}']), ('\u{a796}', ['\u{a797}', '\u{0}', '\u{0}']),
- ('\u{a798}', ['\u{a799}', '\u{0}', '\u{0}']), ('\u{a79a}', ['\u{a79b}', '\u{0}', '\u{0}']),
- ('\u{a79c}', ['\u{a79d}', '\u{0}', '\u{0}']), ('\u{a79e}', ['\u{a79f}', '\u{0}', '\u{0}']),
- ('\u{a7a0}', ['\u{a7a1}', '\u{0}', '\u{0}']), ('\u{a7a2}', ['\u{a7a3}', '\u{0}', '\u{0}']),
- ('\u{a7a4}', ['\u{a7a5}', '\u{0}', '\u{0}']), ('\u{a7a6}', ['\u{a7a7}', '\u{0}', '\u{0}']),
- ('\u{a7a8}', ['\u{a7a9}', '\u{0}', '\u{0}']), ('\u{a7aa}', ['\u{266}', '\u{0}', '\u{0}']),
- ('\u{a7ab}', ['\u{25c}', '\u{0}', '\u{0}']), ('\u{a7ac}', ['\u{261}', '\u{0}', '\u{0}']),
- ('\u{a7ad}', ['\u{26c}', '\u{0}', '\u{0}']), ('\u{a7ae}', ['\u{26a}', '\u{0}', '\u{0}']),
- ('\u{a7b0}', ['\u{29e}', '\u{0}', '\u{0}']), ('\u{a7b1}', ['\u{287}', '\u{0}', '\u{0}']),
- ('\u{a7b2}', ['\u{29d}', '\u{0}', '\u{0}']), ('\u{a7b3}', ['\u{ab53}', '\u{0}', '\u{0}']),
- ('\u{a7b4}', ['\u{a7b5}', '\u{0}', '\u{0}']), ('\u{a7b6}', ['\u{a7b7}', '\u{0}', '\u{0}']),
- ('\u{a7b8}', ['\u{a7b9}', '\u{0}', '\u{0}']), ('\u{a7ba}', ['\u{a7bb}', '\u{0}', '\u{0}']),
- ('\u{a7bc}', ['\u{a7bd}', '\u{0}', '\u{0}']), ('\u{a7be}', ['\u{a7bf}', '\u{0}', '\u{0}']),
- ('\u{a7c0}', ['\u{a7c1}', '\u{0}', '\u{0}']), ('\u{a7c2}', ['\u{a7c3}', '\u{0}', '\u{0}']),
- ('\u{a7c4}', ['\u{a794}', '\u{0}', '\u{0}']), ('\u{a7c5}', ['\u{282}', '\u{0}', '\u{0}']),
- ('\u{a7c6}', ['\u{1d8e}', '\u{0}', '\u{0}']), ('\u{a7c7}', ['\u{a7c8}', '\u{0}', '\u{0}']),
- ('\u{a7c9}', ['\u{a7ca}', '\u{0}', '\u{0}']), ('\u{a7d0}', ['\u{a7d1}', '\u{0}', '\u{0}']),
- ('\u{a7d6}', ['\u{a7d7}', '\u{0}', '\u{0}']), ('\u{a7d8}', ['\u{a7d9}', '\u{0}', '\u{0}']),
- ('\u{a7f5}', ['\u{a7f6}', '\u{0}', '\u{0}']), ('\u{ff21}', ['\u{ff41}', '\u{0}', '\u{0}']),
- ('\u{ff22}', ['\u{ff42}', '\u{0}', '\u{0}']), ('\u{ff23}', ['\u{ff43}', '\u{0}', '\u{0}']),
- ('\u{ff24}', ['\u{ff44}', '\u{0}', '\u{0}']), ('\u{ff25}', ['\u{ff45}', '\u{0}', '\u{0}']),
- ('\u{ff26}', ['\u{ff46}', '\u{0}', '\u{0}']), ('\u{ff27}', ['\u{ff47}', '\u{0}', '\u{0}']),
- ('\u{ff28}', ['\u{ff48}', '\u{0}', '\u{0}']), ('\u{ff29}', ['\u{ff49}', '\u{0}', '\u{0}']),
- ('\u{ff2a}', ['\u{ff4a}', '\u{0}', '\u{0}']), ('\u{ff2b}', ['\u{ff4b}', '\u{0}', '\u{0}']),
- ('\u{ff2c}', ['\u{ff4c}', '\u{0}', '\u{0}']), ('\u{ff2d}', ['\u{ff4d}', '\u{0}', '\u{0}']),
- ('\u{ff2e}', ['\u{ff4e}', '\u{0}', '\u{0}']), ('\u{ff2f}', ['\u{ff4f}', '\u{0}', '\u{0}']),
- ('\u{ff30}', ['\u{ff50}', '\u{0}', '\u{0}']), ('\u{ff31}', ['\u{ff51}', '\u{0}', '\u{0}']),
- ('\u{ff32}', ['\u{ff52}', '\u{0}', '\u{0}']), ('\u{ff33}', ['\u{ff53}', '\u{0}', '\u{0}']),
- ('\u{ff34}', ['\u{ff54}', '\u{0}', '\u{0}']), ('\u{ff35}', ['\u{ff55}', '\u{0}', '\u{0}']),
- ('\u{ff36}', ['\u{ff56}', '\u{0}', '\u{0}']), ('\u{ff37}', ['\u{ff57}', '\u{0}', '\u{0}']),
- ('\u{ff38}', ['\u{ff58}', '\u{0}', '\u{0}']), ('\u{ff39}', ['\u{ff59}', '\u{0}', '\u{0}']),
- ('\u{ff3a}', ['\u{ff5a}', '\u{0}', '\u{0}']),
- ('\u{10400}', ['\u{10428}', '\u{0}', '\u{0}']),
- ('\u{10401}', ['\u{10429}', '\u{0}', '\u{0}']),
- ('\u{10402}', ['\u{1042a}', '\u{0}', '\u{0}']),
- ('\u{10403}', ['\u{1042b}', '\u{0}', '\u{0}']),
- ('\u{10404}', ['\u{1042c}', '\u{0}', '\u{0}']),
- ('\u{10405}', ['\u{1042d}', '\u{0}', '\u{0}']),
- ('\u{10406}', ['\u{1042e}', '\u{0}', '\u{0}']),
- ('\u{10407}', ['\u{1042f}', '\u{0}', '\u{0}']),
- ('\u{10408}', ['\u{10430}', '\u{0}', '\u{0}']),
- ('\u{10409}', ['\u{10431}', '\u{0}', '\u{0}']),
- ('\u{1040a}', ['\u{10432}', '\u{0}', '\u{0}']),
- ('\u{1040b}', ['\u{10433}', '\u{0}', '\u{0}']),
- ('\u{1040c}', ['\u{10434}', '\u{0}', '\u{0}']),
- ('\u{1040d}', ['\u{10435}', '\u{0}', '\u{0}']),
- ('\u{1040e}', ['\u{10436}', '\u{0}', '\u{0}']),
- ('\u{1040f}', ['\u{10437}', '\u{0}', '\u{0}']),
- ('\u{10410}', ['\u{10438}', '\u{0}', '\u{0}']),
- ('\u{10411}', ['\u{10439}', '\u{0}', '\u{0}']),
- ('\u{10412}', ['\u{1043a}', '\u{0}', '\u{0}']),
- ('\u{10413}', ['\u{1043b}', '\u{0}', '\u{0}']),
- ('\u{10414}', ['\u{1043c}', '\u{0}', '\u{0}']),
- ('\u{10415}', ['\u{1043d}', '\u{0}', '\u{0}']),
- ('\u{10416}', ['\u{1043e}', '\u{0}', '\u{0}']),
- ('\u{10417}', ['\u{1043f}', '\u{0}', '\u{0}']),
- ('\u{10418}', ['\u{10440}', '\u{0}', '\u{0}']),
- ('\u{10419}', ['\u{10441}', '\u{0}', '\u{0}']),
- ('\u{1041a}', ['\u{10442}', '\u{0}', '\u{0}']),
- ('\u{1041b}', ['\u{10443}', '\u{0}', '\u{0}']),
- ('\u{1041c}', ['\u{10444}', '\u{0}', '\u{0}']),
- ('\u{1041d}', ['\u{10445}', '\u{0}', '\u{0}']),
- ('\u{1041e}', ['\u{10446}', '\u{0}', '\u{0}']),
- ('\u{1041f}', ['\u{10447}', '\u{0}', '\u{0}']),
- ('\u{10420}', ['\u{10448}', '\u{0}', '\u{0}']),
- ('\u{10421}', ['\u{10449}', '\u{0}', '\u{0}']),
- ('\u{10422}', ['\u{1044a}', '\u{0}', '\u{0}']),
- ('\u{10423}', ['\u{1044b}', '\u{0}', '\u{0}']),
- ('\u{10424}', ['\u{1044c}', '\u{0}', '\u{0}']),
- ('\u{10425}', ['\u{1044d}', '\u{0}', '\u{0}']),
- ('\u{10426}', ['\u{1044e}', '\u{0}', '\u{0}']),
- ('\u{10427}', ['\u{1044f}', '\u{0}', '\u{0}']),
- ('\u{104b0}', ['\u{104d8}', '\u{0}', '\u{0}']),
- ('\u{104b1}', ['\u{104d9}', '\u{0}', '\u{0}']),
- ('\u{104b2}', ['\u{104da}', '\u{0}', '\u{0}']),
- ('\u{104b3}', ['\u{104db}', '\u{0}', '\u{0}']),
- ('\u{104b4}', ['\u{104dc}', '\u{0}', '\u{0}']),
- ('\u{104b5}', ['\u{104dd}', '\u{0}', '\u{0}']),
- ('\u{104b6}', ['\u{104de}', '\u{0}', '\u{0}']),
- ('\u{104b7}', ['\u{104df}', '\u{0}', '\u{0}']),
- ('\u{104b8}', ['\u{104e0}', '\u{0}', '\u{0}']),
- ('\u{104b9}', ['\u{104e1}', '\u{0}', '\u{0}']),
- ('\u{104ba}', ['\u{104e2}', '\u{0}', '\u{0}']),
- ('\u{104bb}', ['\u{104e3}', '\u{0}', '\u{0}']),
- ('\u{104bc}', ['\u{104e4}', '\u{0}', '\u{0}']),
- ('\u{104bd}', ['\u{104e5}', '\u{0}', '\u{0}']),
- ('\u{104be}', ['\u{104e6}', '\u{0}', '\u{0}']),
- ('\u{104bf}', ['\u{104e7}', '\u{0}', '\u{0}']),
- ('\u{104c0}', ['\u{104e8}', '\u{0}', '\u{0}']),
- ('\u{104c1}', ['\u{104e9}', '\u{0}', '\u{0}']),
- ('\u{104c2}', ['\u{104ea}', '\u{0}', '\u{0}']),
- ('\u{104c3}', ['\u{104eb}', '\u{0}', '\u{0}']),
- ('\u{104c4}', ['\u{104ec}', '\u{0}', '\u{0}']),
- ('\u{104c5}', ['\u{104ed}', '\u{0}', '\u{0}']),
- ('\u{104c6}', ['\u{104ee}', '\u{0}', '\u{0}']),
- ('\u{104c7}', ['\u{104ef}', '\u{0}', '\u{0}']),
- ('\u{104c8}', ['\u{104f0}', '\u{0}', '\u{0}']),
- ('\u{104c9}', ['\u{104f1}', '\u{0}', '\u{0}']),
- ('\u{104ca}', ['\u{104f2}', '\u{0}', '\u{0}']),
- ('\u{104cb}', ['\u{104f3}', '\u{0}', '\u{0}']),
- ('\u{104cc}', ['\u{104f4}', '\u{0}', '\u{0}']),
- ('\u{104cd}', ['\u{104f5}', '\u{0}', '\u{0}']),
- ('\u{104ce}', ['\u{104f6}', '\u{0}', '\u{0}']),
- ('\u{104cf}', ['\u{104f7}', '\u{0}', '\u{0}']),
- ('\u{104d0}', ['\u{104f8}', '\u{0}', '\u{0}']),
- ('\u{104d1}', ['\u{104f9}', '\u{0}', '\u{0}']),
- ('\u{104d2}', ['\u{104fa}', '\u{0}', '\u{0}']),
- ('\u{104d3}', ['\u{104fb}', '\u{0}', '\u{0}']),
- ('\u{10570}', ['\u{10597}', '\u{0}', '\u{0}']),
- ('\u{10571}', ['\u{10598}', '\u{0}', '\u{0}']),
- ('\u{10572}', ['\u{10599}', '\u{0}', '\u{0}']),
- ('\u{10573}', ['\u{1059a}', '\u{0}', '\u{0}']),
- ('\u{10574}', ['\u{1059b}', '\u{0}', '\u{0}']),
- ('\u{10575}', ['\u{1059c}', '\u{0}', '\u{0}']),
- ('\u{10576}', ['\u{1059d}', '\u{0}', '\u{0}']),
- ('\u{10577}', ['\u{1059e}', '\u{0}', '\u{0}']),
- ('\u{10578}', ['\u{1059f}', '\u{0}', '\u{0}']),
- ('\u{10579}', ['\u{105a0}', '\u{0}', '\u{0}']),
- ('\u{1057a}', ['\u{105a1}', '\u{0}', '\u{0}']),
- ('\u{1057c}', ['\u{105a3}', '\u{0}', '\u{0}']),
- ('\u{1057d}', ['\u{105a4}', '\u{0}', '\u{0}']),
- ('\u{1057e}', ['\u{105a5}', '\u{0}', '\u{0}']),
- ('\u{1057f}', ['\u{105a6}', '\u{0}', '\u{0}']),
- ('\u{10580}', ['\u{105a7}', '\u{0}', '\u{0}']),
- ('\u{10581}', ['\u{105a8}', '\u{0}', '\u{0}']),
- ('\u{10582}', ['\u{105a9}', '\u{0}', '\u{0}']),
- ('\u{10583}', ['\u{105aa}', '\u{0}', '\u{0}']),
- ('\u{10584}', ['\u{105ab}', '\u{0}', '\u{0}']),
- ('\u{10585}', ['\u{105ac}', '\u{0}', '\u{0}']),
- ('\u{10586}', ['\u{105ad}', '\u{0}', '\u{0}']),
- ('\u{10587}', ['\u{105ae}', '\u{0}', '\u{0}']),
- ('\u{10588}', ['\u{105af}', '\u{0}', '\u{0}']),
- ('\u{10589}', ['\u{105b0}', '\u{0}', '\u{0}']),
- ('\u{1058a}', ['\u{105b1}', '\u{0}', '\u{0}']),
- ('\u{1058c}', ['\u{105b3}', '\u{0}', '\u{0}']),
- ('\u{1058d}', ['\u{105b4}', '\u{0}', '\u{0}']),
- ('\u{1058e}', ['\u{105b5}', '\u{0}', '\u{0}']),
- ('\u{1058f}', ['\u{105b6}', '\u{0}', '\u{0}']),
- ('\u{10590}', ['\u{105b7}', '\u{0}', '\u{0}']),
- ('\u{10591}', ['\u{105b8}', '\u{0}', '\u{0}']),
- ('\u{10592}', ['\u{105b9}', '\u{0}', '\u{0}']),
- ('\u{10594}', ['\u{105bb}', '\u{0}', '\u{0}']),
- ('\u{10595}', ['\u{105bc}', '\u{0}', '\u{0}']),
- ('\u{10c80}', ['\u{10cc0}', '\u{0}', '\u{0}']),
- ('\u{10c81}', ['\u{10cc1}', '\u{0}', '\u{0}']),
- ('\u{10c82}', ['\u{10cc2}', '\u{0}', '\u{0}']),
- ('\u{10c83}', ['\u{10cc3}', '\u{0}', '\u{0}']),
- ('\u{10c84}', ['\u{10cc4}', '\u{0}', '\u{0}']),
- ('\u{10c85}', ['\u{10cc5}', '\u{0}', '\u{0}']),
- ('\u{10c86}', ['\u{10cc6}', '\u{0}', '\u{0}']),
- ('\u{10c87}', ['\u{10cc7}', '\u{0}', '\u{0}']),
- ('\u{10c88}', ['\u{10cc8}', '\u{0}', '\u{0}']),
- ('\u{10c89}', ['\u{10cc9}', '\u{0}', '\u{0}']),
- ('\u{10c8a}', ['\u{10cca}', '\u{0}', '\u{0}']),
- ('\u{10c8b}', ['\u{10ccb}', '\u{0}', '\u{0}']),
- ('\u{10c8c}', ['\u{10ccc}', '\u{0}', '\u{0}']),
- ('\u{10c8d}', ['\u{10ccd}', '\u{0}', '\u{0}']),
- ('\u{10c8e}', ['\u{10cce}', '\u{0}', '\u{0}']),
- ('\u{10c8f}', ['\u{10ccf}', '\u{0}', '\u{0}']),
- ('\u{10c90}', ['\u{10cd0}', '\u{0}', '\u{0}']),
- ('\u{10c91}', ['\u{10cd1}', '\u{0}', '\u{0}']),
- ('\u{10c92}', ['\u{10cd2}', '\u{0}', '\u{0}']),
- ('\u{10c93}', ['\u{10cd3}', '\u{0}', '\u{0}']),
- ('\u{10c94}', ['\u{10cd4}', '\u{0}', '\u{0}']),
- ('\u{10c95}', ['\u{10cd5}', '\u{0}', '\u{0}']),
- ('\u{10c96}', ['\u{10cd6}', '\u{0}', '\u{0}']),
- ('\u{10c97}', ['\u{10cd7}', '\u{0}', '\u{0}']),
- ('\u{10c98}', ['\u{10cd8}', '\u{0}', '\u{0}']),
- ('\u{10c99}', ['\u{10cd9}', '\u{0}', '\u{0}']),
- ('\u{10c9a}', ['\u{10cda}', '\u{0}', '\u{0}']),
- ('\u{10c9b}', ['\u{10cdb}', '\u{0}', '\u{0}']),
- ('\u{10c9c}', ['\u{10cdc}', '\u{0}', '\u{0}']),
- ('\u{10c9d}', ['\u{10cdd}', '\u{0}', '\u{0}']),
- ('\u{10c9e}', ['\u{10cde}', '\u{0}', '\u{0}']),
- ('\u{10c9f}', ['\u{10cdf}', '\u{0}', '\u{0}']),
- ('\u{10ca0}', ['\u{10ce0}', '\u{0}', '\u{0}']),
- ('\u{10ca1}', ['\u{10ce1}', '\u{0}', '\u{0}']),
- ('\u{10ca2}', ['\u{10ce2}', '\u{0}', '\u{0}']),
- ('\u{10ca3}', ['\u{10ce3}', '\u{0}', '\u{0}']),
- ('\u{10ca4}', ['\u{10ce4}', '\u{0}', '\u{0}']),
- ('\u{10ca5}', ['\u{10ce5}', '\u{0}', '\u{0}']),
- ('\u{10ca6}', ['\u{10ce6}', '\u{0}', '\u{0}']),
- ('\u{10ca7}', ['\u{10ce7}', '\u{0}', '\u{0}']),
- ('\u{10ca8}', ['\u{10ce8}', '\u{0}', '\u{0}']),
- ('\u{10ca9}', ['\u{10ce9}', '\u{0}', '\u{0}']),
- ('\u{10caa}', ['\u{10cea}', '\u{0}', '\u{0}']),
- ('\u{10cab}', ['\u{10ceb}', '\u{0}', '\u{0}']),
- ('\u{10cac}', ['\u{10cec}', '\u{0}', '\u{0}']),
- ('\u{10cad}', ['\u{10ced}', '\u{0}', '\u{0}']),
- ('\u{10cae}', ['\u{10cee}', '\u{0}', '\u{0}']),
- ('\u{10caf}', ['\u{10cef}', '\u{0}', '\u{0}']),
- ('\u{10cb0}', ['\u{10cf0}', '\u{0}', '\u{0}']),
- ('\u{10cb1}', ['\u{10cf1}', '\u{0}', '\u{0}']),
- ('\u{10cb2}', ['\u{10cf2}', '\u{0}', '\u{0}']),
- ('\u{118a0}', ['\u{118c0}', '\u{0}', '\u{0}']),
- ('\u{118a1}', ['\u{118c1}', '\u{0}', '\u{0}']),
- ('\u{118a2}', ['\u{118c2}', '\u{0}', '\u{0}']),
- ('\u{118a3}', ['\u{118c3}', '\u{0}', '\u{0}']),
- ('\u{118a4}', ['\u{118c4}', '\u{0}', '\u{0}']),
- ('\u{118a5}', ['\u{118c5}', '\u{0}', '\u{0}']),
- ('\u{118a6}', ['\u{118c6}', '\u{0}', '\u{0}']),
- ('\u{118a7}', ['\u{118c7}', '\u{0}', '\u{0}']),
- ('\u{118a8}', ['\u{118c8}', '\u{0}', '\u{0}']),
- ('\u{118a9}', ['\u{118c9}', '\u{0}', '\u{0}']),
- ('\u{118aa}', ['\u{118ca}', '\u{0}', '\u{0}']),
- ('\u{118ab}', ['\u{118cb}', '\u{0}', '\u{0}']),
- ('\u{118ac}', ['\u{118cc}', '\u{0}', '\u{0}']),
- ('\u{118ad}', ['\u{118cd}', '\u{0}', '\u{0}']),
- ('\u{118ae}', ['\u{118ce}', '\u{0}', '\u{0}']),
- ('\u{118af}', ['\u{118cf}', '\u{0}', '\u{0}']),
- ('\u{118b0}', ['\u{118d0}', '\u{0}', '\u{0}']),
- ('\u{118b1}', ['\u{118d1}', '\u{0}', '\u{0}']),
- ('\u{118b2}', ['\u{118d2}', '\u{0}', '\u{0}']),
- ('\u{118b3}', ['\u{118d3}', '\u{0}', '\u{0}']),
- ('\u{118b4}', ['\u{118d4}', '\u{0}', '\u{0}']),
- ('\u{118b5}', ['\u{118d5}', '\u{0}', '\u{0}']),
- ('\u{118b6}', ['\u{118d6}', '\u{0}', '\u{0}']),
- ('\u{118b7}', ['\u{118d7}', '\u{0}', '\u{0}']),
- ('\u{118b8}', ['\u{118d8}', '\u{0}', '\u{0}']),
- ('\u{118b9}', ['\u{118d9}', '\u{0}', '\u{0}']),
- ('\u{118ba}', ['\u{118da}', '\u{0}', '\u{0}']),
- ('\u{118bb}', ['\u{118db}', '\u{0}', '\u{0}']),
- ('\u{118bc}', ['\u{118dc}', '\u{0}', '\u{0}']),
- ('\u{118bd}', ['\u{118dd}', '\u{0}', '\u{0}']),
- ('\u{118be}', ['\u{118de}', '\u{0}', '\u{0}']),
- ('\u{118bf}', ['\u{118df}', '\u{0}', '\u{0}']),
- ('\u{16e40}', ['\u{16e60}', '\u{0}', '\u{0}']),
- ('\u{16e41}', ['\u{16e61}', '\u{0}', '\u{0}']),
- ('\u{16e42}', ['\u{16e62}', '\u{0}', '\u{0}']),
- ('\u{16e43}', ['\u{16e63}', '\u{0}', '\u{0}']),
- ('\u{16e44}', ['\u{16e64}', '\u{0}', '\u{0}']),
- ('\u{16e45}', ['\u{16e65}', '\u{0}', '\u{0}']),
- ('\u{16e46}', ['\u{16e66}', '\u{0}', '\u{0}']),
- ('\u{16e47}', ['\u{16e67}', '\u{0}', '\u{0}']),
- ('\u{16e48}', ['\u{16e68}', '\u{0}', '\u{0}']),
- ('\u{16e49}', ['\u{16e69}', '\u{0}', '\u{0}']),
- ('\u{16e4a}', ['\u{16e6a}', '\u{0}', '\u{0}']),
- ('\u{16e4b}', ['\u{16e6b}', '\u{0}', '\u{0}']),
- ('\u{16e4c}', ['\u{16e6c}', '\u{0}', '\u{0}']),
- ('\u{16e4d}', ['\u{16e6d}', '\u{0}', '\u{0}']),
- ('\u{16e4e}', ['\u{16e6e}', '\u{0}', '\u{0}']),
- ('\u{16e4f}', ['\u{16e6f}', '\u{0}', '\u{0}']),
- ('\u{16e50}', ['\u{16e70}', '\u{0}', '\u{0}']),
- ('\u{16e51}', ['\u{16e71}', '\u{0}', '\u{0}']),
- ('\u{16e52}', ['\u{16e72}', '\u{0}', '\u{0}']),
- ('\u{16e53}', ['\u{16e73}', '\u{0}', '\u{0}']),
- ('\u{16e54}', ['\u{16e74}', '\u{0}', '\u{0}']),
- ('\u{16e55}', ['\u{16e75}', '\u{0}', '\u{0}']),
- ('\u{16e56}', ['\u{16e76}', '\u{0}', '\u{0}']),
- ('\u{16e57}', ['\u{16e77}', '\u{0}', '\u{0}']),
- ('\u{16e58}', ['\u{16e78}', '\u{0}', '\u{0}']),
- ('\u{16e59}', ['\u{16e79}', '\u{0}', '\u{0}']),
- ('\u{16e5a}', ['\u{16e7a}', '\u{0}', '\u{0}']),
- ('\u{16e5b}', ['\u{16e7b}', '\u{0}', '\u{0}']),
- ('\u{16e5c}', ['\u{16e7c}', '\u{0}', '\u{0}']),
- ('\u{16e5d}', ['\u{16e7d}', '\u{0}', '\u{0}']),
- ('\u{16e5e}', ['\u{16e7e}', '\u{0}', '\u{0}']),
- ('\u{16e5f}', ['\u{16e7f}', '\u{0}', '\u{0}']),
- ('\u{1e900}', ['\u{1e922}', '\u{0}', '\u{0}']),
- ('\u{1e901}', ['\u{1e923}', '\u{0}', '\u{0}']),
- ('\u{1e902}', ['\u{1e924}', '\u{0}', '\u{0}']),
- ('\u{1e903}', ['\u{1e925}', '\u{0}', '\u{0}']),
- ('\u{1e904}', ['\u{1e926}', '\u{0}', '\u{0}']),
- ('\u{1e905}', ['\u{1e927}', '\u{0}', '\u{0}']),
- ('\u{1e906}', ['\u{1e928}', '\u{0}', '\u{0}']),
- ('\u{1e907}', ['\u{1e929}', '\u{0}', '\u{0}']),
- ('\u{1e908}', ['\u{1e92a}', '\u{0}', '\u{0}']),
- ('\u{1e909}', ['\u{1e92b}', '\u{0}', '\u{0}']),
- ('\u{1e90a}', ['\u{1e92c}', '\u{0}', '\u{0}']),
- ('\u{1e90b}', ['\u{1e92d}', '\u{0}', '\u{0}']),
- ('\u{1e90c}', ['\u{1e92e}', '\u{0}', '\u{0}']),
- ('\u{1e90d}', ['\u{1e92f}', '\u{0}', '\u{0}']),
- ('\u{1e90e}', ['\u{1e930}', '\u{0}', '\u{0}']),
- ('\u{1e90f}', ['\u{1e931}', '\u{0}', '\u{0}']),
- ('\u{1e910}', ['\u{1e932}', '\u{0}', '\u{0}']),
- ('\u{1e911}', ['\u{1e933}', '\u{0}', '\u{0}']),
- ('\u{1e912}', ['\u{1e934}', '\u{0}', '\u{0}']),
- ('\u{1e913}', ['\u{1e935}', '\u{0}', '\u{0}']),
- ('\u{1e914}', ['\u{1e936}', '\u{0}', '\u{0}']),
- ('\u{1e915}', ['\u{1e937}', '\u{0}', '\u{0}']),
- ('\u{1e916}', ['\u{1e938}', '\u{0}', '\u{0}']),
- ('\u{1e917}', ['\u{1e939}', '\u{0}', '\u{0}']),
- ('\u{1e918}', ['\u{1e93a}', '\u{0}', '\u{0}']),
- ('\u{1e919}', ['\u{1e93b}', '\u{0}', '\u{0}']),
- ('\u{1e91a}', ['\u{1e93c}', '\u{0}', '\u{0}']),
- ('\u{1e91b}', ['\u{1e93d}', '\u{0}', '\u{0}']),
- ('\u{1e91c}', ['\u{1e93e}', '\u{0}', '\u{0}']),
- ('\u{1e91d}', ['\u{1e93f}', '\u{0}', '\u{0}']),
- ('\u{1e91e}', ['\u{1e940}', '\u{0}', '\u{0}']),
- ('\u{1e91f}', ['\u{1e941}', '\u{0}', '\u{0}']),
- ('\u{1e920}', ['\u{1e942}', '\u{0}', '\u{0}']),
- ('\u{1e921}', ['\u{1e943}', '\u{0}', '\u{0}']),
+ static LOWERCASE_TABLE: &[(char, u32)] = &[
+ ('\u{c0}', 224), ('\u{c1}', 225), ('\u{c2}', 226), ('\u{c3}', 227), ('\u{c4}', 228),
+ ('\u{c5}', 229), ('\u{c6}', 230), ('\u{c7}', 231), ('\u{c8}', 232), ('\u{c9}', 233),
+ ('\u{ca}', 234), ('\u{cb}', 235), ('\u{cc}', 236), ('\u{cd}', 237), ('\u{ce}', 238),
+ ('\u{cf}', 239), ('\u{d0}', 240), ('\u{d1}', 241), ('\u{d2}', 242), ('\u{d3}', 243),
+ ('\u{d4}', 244), ('\u{d5}', 245), ('\u{d6}', 246), ('\u{d8}', 248), ('\u{d9}', 249),
+ ('\u{da}', 250), ('\u{db}', 251), ('\u{dc}', 252), ('\u{dd}', 253), ('\u{de}', 254),
+ ('\u{100}', 257), ('\u{102}', 259), ('\u{104}', 261), ('\u{106}', 263), ('\u{108}', 265),
+ ('\u{10a}', 267), ('\u{10c}', 269), ('\u{10e}', 271), ('\u{110}', 273), ('\u{112}', 275),
+ ('\u{114}', 277), ('\u{116}', 279), ('\u{118}', 281), ('\u{11a}', 283), ('\u{11c}', 285),
+ ('\u{11e}', 287), ('\u{120}', 289), ('\u{122}', 291), ('\u{124}', 293), ('\u{126}', 295),
+ ('\u{128}', 297), ('\u{12a}', 299), ('\u{12c}', 301), ('\u{12e}', 303),
+ ('\u{130}', 4194304), ('\u{132}', 307), ('\u{134}', 309), ('\u{136}', 311),
+ ('\u{139}', 314), ('\u{13b}', 316), ('\u{13d}', 318), ('\u{13f}', 320), ('\u{141}', 322),
+ ('\u{143}', 324), ('\u{145}', 326), ('\u{147}', 328), ('\u{14a}', 331), ('\u{14c}', 333),
+ ('\u{14e}', 335), ('\u{150}', 337), ('\u{152}', 339), ('\u{154}', 341), ('\u{156}', 343),
+ ('\u{158}', 345), ('\u{15a}', 347), ('\u{15c}', 349), ('\u{15e}', 351), ('\u{160}', 353),
+ ('\u{162}', 355), ('\u{164}', 357), ('\u{166}', 359), ('\u{168}', 361), ('\u{16a}', 363),
+ ('\u{16c}', 365), ('\u{16e}', 367), ('\u{170}', 369), ('\u{172}', 371), ('\u{174}', 373),
+ ('\u{176}', 375), ('\u{178}', 255), ('\u{179}', 378), ('\u{17b}', 380), ('\u{17d}', 382),
+ ('\u{181}', 595), ('\u{182}', 387), ('\u{184}', 389), ('\u{186}', 596), ('\u{187}', 392),
+ ('\u{189}', 598), ('\u{18a}', 599), ('\u{18b}', 396), ('\u{18e}', 477), ('\u{18f}', 601),
+ ('\u{190}', 603), ('\u{191}', 402), ('\u{193}', 608), ('\u{194}', 611), ('\u{196}', 617),
+ ('\u{197}', 616), ('\u{198}', 409), ('\u{19c}', 623), ('\u{19d}', 626), ('\u{19f}', 629),
+ ('\u{1a0}', 417), ('\u{1a2}', 419), ('\u{1a4}', 421), ('\u{1a6}', 640), ('\u{1a7}', 424),
+ ('\u{1a9}', 643), ('\u{1ac}', 429), ('\u{1ae}', 648), ('\u{1af}', 432), ('\u{1b1}', 650),
+ ('\u{1b2}', 651), ('\u{1b3}', 436), ('\u{1b5}', 438), ('\u{1b7}', 658), ('\u{1b8}', 441),
+ ('\u{1bc}', 445), ('\u{1c4}', 454), ('\u{1c5}', 454), ('\u{1c7}', 457), ('\u{1c8}', 457),
+ ('\u{1ca}', 460), ('\u{1cb}', 460), ('\u{1cd}', 462), ('\u{1cf}', 464), ('\u{1d1}', 466),
+ ('\u{1d3}', 468), ('\u{1d5}', 470), ('\u{1d7}', 472), ('\u{1d9}', 474), ('\u{1db}', 476),
+ ('\u{1de}', 479), ('\u{1e0}', 481), ('\u{1e2}', 483), ('\u{1e4}', 485), ('\u{1e6}', 487),
+ ('\u{1e8}', 489), ('\u{1ea}', 491), ('\u{1ec}', 493), ('\u{1ee}', 495), ('\u{1f1}', 499),
+ ('\u{1f2}', 499), ('\u{1f4}', 501), ('\u{1f6}', 405), ('\u{1f7}', 447), ('\u{1f8}', 505),
+ ('\u{1fa}', 507), ('\u{1fc}', 509), ('\u{1fe}', 511), ('\u{200}', 513), ('\u{202}', 515),
+ ('\u{204}', 517), ('\u{206}', 519), ('\u{208}', 521), ('\u{20a}', 523), ('\u{20c}', 525),
+ ('\u{20e}', 527), ('\u{210}', 529), ('\u{212}', 531), ('\u{214}', 533), ('\u{216}', 535),
+ ('\u{218}', 537), ('\u{21a}', 539), ('\u{21c}', 541), ('\u{21e}', 543), ('\u{220}', 414),
+ ('\u{222}', 547), ('\u{224}', 549), ('\u{226}', 551), ('\u{228}', 553), ('\u{22a}', 555),
+ ('\u{22c}', 557), ('\u{22e}', 559), ('\u{230}', 561), ('\u{232}', 563), ('\u{23a}', 11365),
+ ('\u{23b}', 572), ('\u{23d}', 410), ('\u{23e}', 11366), ('\u{241}', 578), ('\u{243}', 384),
+ ('\u{244}', 649), ('\u{245}', 652), ('\u{246}', 583), ('\u{248}', 585), ('\u{24a}', 587),
+ ('\u{24c}', 589), ('\u{24e}', 591), ('\u{370}', 881), ('\u{372}', 883), ('\u{376}', 887),
+ ('\u{37f}', 1011), ('\u{386}', 940), ('\u{388}', 941), ('\u{389}', 942), ('\u{38a}', 943),
+ ('\u{38c}', 972), ('\u{38e}', 973), ('\u{38f}', 974), ('\u{391}', 945), ('\u{392}', 946),
+ ('\u{393}', 947), ('\u{394}', 948), ('\u{395}', 949), ('\u{396}', 950), ('\u{397}', 951),
+ ('\u{398}', 952), ('\u{399}', 953), ('\u{39a}', 954), ('\u{39b}', 955), ('\u{39c}', 956),
+ ('\u{39d}', 957), ('\u{39e}', 958), ('\u{39f}', 959), ('\u{3a0}', 960), ('\u{3a1}', 961),
+ ('\u{3a3}', 963), ('\u{3a4}', 964), ('\u{3a5}', 965), ('\u{3a6}', 966), ('\u{3a7}', 967),
+ ('\u{3a8}', 968), ('\u{3a9}', 969), ('\u{3aa}', 970), ('\u{3ab}', 971), ('\u{3cf}', 983),
+ ('\u{3d8}', 985), ('\u{3da}', 987), ('\u{3dc}', 989), ('\u{3de}', 991), ('\u{3e0}', 993),
+ ('\u{3e2}', 995), ('\u{3e4}', 997), ('\u{3e6}', 999), ('\u{3e8}', 1001), ('\u{3ea}', 1003),
+ ('\u{3ec}', 1005), ('\u{3ee}', 1007), ('\u{3f4}', 952), ('\u{3f7}', 1016),
+ ('\u{3f9}', 1010), ('\u{3fa}', 1019), ('\u{3fd}', 891), ('\u{3fe}', 892), ('\u{3ff}', 893),
+ ('\u{400}', 1104), ('\u{401}', 1105), ('\u{402}', 1106), ('\u{403}', 1107),
+ ('\u{404}', 1108), ('\u{405}', 1109), ('\u{406}', 1110), ('\u{407}', 1111),
+ ('\u{408}', 1112), ('\u{409}', 1113), ('\u{40a}', 1114), ('\u{40b}', 1115),
+ ('\u{40c}', 1116), ('\u{40d}', 1117), ('\u{40e}', 1118), ('\u{40f}', 1119),
+ ('\u{410}', 1072), ('\u{411}', 1073), ('\u{412}', 1074), ('\u{413}', 1075),
+ ('\u{414}', 1076), ('\u{415}', 1077), ('\u{416}', 1078), ('\u{417}', 1079),
+ ('\u{418}', 1080), ('\u{419}', 1081), ('\u{41a}', 1082), ('\u{41b}', 1083),
+ ('\u{41c}', 1084), ('\u{41d}', 1085), ('\u{41e}', 1086), ('\u{41f}', 1087),
+ ('\u{420}', 1088), ('\u{421}', 1089), ('\u{422}', 1090), ('\u{423}', 1091),
+ ('\u{424}', 1092), ('\u{425}', 1093), ('\u{426}', 1094), ('\u{427}', 1095),
+ ('\u{428}', 1096), ('\u{429}', 1097), ('\u{42a}', 1098), ('\u{42b}', 1099),
+ ('\u{42c}', 1100), ('\u{42d}', 1101), ('\u{42e}', 1102), ('\u{42f}', 1103),
+ ('\u{460}', 1121), ('\u{462}', 1123), ('\u{464}', 1125), ('\u{466}', 1127),
+ ('\u{468}', 1129), ('\u{46a}', 1131), ('\u{46c}', 1133), ('\u{46e}', 1135),
+ ('\u{470}', 1137), ('\u{472}', 1139), ('\u{474}', 1141), ('\u{476}', 1143),
+ ('\u{478}', 1145), ('\u{47a}', 1147), ('\u{47c}', 1149), ('\u{47e}', 1151),
+ ('\u{480}', 1153), ('\u{48a}', 1163), ('\u{48c}', 1165), ('\u{48e}', 1167),
+ ('\u{490}', 1169), ('\u{492}', 1171), ('\u{494}', 1173), ('\u{496}', 1175),
+ ('\u{498}', 1177), ('\u{49a}', 1179), ('\u{49c}', 1181), ('\u{49e}', 1183),
+ ('\u{4a0}', 1185), ('\u{4a2}', 1187), ('\u{4a4}', 1189), ('\u{4a6}', 1191),
+ ('\u{4a8}', 1193), ('\u{4aa}', 1195), ('\u{4ac}', 1197), ('\u{4ae}', 1199),
+ ('\u{4b0}', 1201), ('\u{4b2}', 1203), ('\u{4b4}', 1205), ('\u{4b6}', 1207),
+ ('\u{4b8}', 1209), ('\u{4ba}', 1211), ('\u{4bc}', 1213), ('\u{4be}', 1215),
+ ('\u{4c0}', 1231), ('\u{4c1}', 1218), ('\u{4c3}', 1220), ('\u{4c5}', 1222),
+ ('\u{4c7}', 1224), ('\u{4c9}', 1226), ('\u{4cb}', 1228), ('\u{4cd}', 1230),
+ ('\u{4d0}', 1233), ('\u{4d2}', 1235), ('\u{4d4}', 1237), ('\u{4d6}', 1239),
+ ('\u{4d8}', 1241), ('\u{4da}', 1243), ('\u{4dc}', 1245), ('\u{4de}', 1247),
+ ('\u{4e0}', 1249), ('\u{4e2}', 1251), ('\u{4e4}', 1253), ('\u{4e6}', 1255),
+ ('\u{4e8}', 1257), ('\u{4ea}', 1259), ('\u{4ec}', 1261), ('\u{4ee}', 1263),
+ ('\u{4f0}', 1265), ('\u{4f2}', 1267), ('\u{4f4}', 1269), ('\u{4f6}', 1271),
+ ('\u{4f8}', 1273), ('\u{4fa}', 1275), ('\u{4fc}', 1277), ('\u{4fe}', 1279),
+ ('\u{500}', 1281), ('\u{502}', 1283), ('\u{504}', 1285), ('\u{506}', 1287),
+ ('\u{508}', 1289), ('\u{50a}', 1291), ('\u{50c}', 1293), ('\u{50e}', 1295),
+ ('\u{510}', 1297), ('\u{512}', 1299), ('\u{514}', 1301), ('\u{516}', 1303),
+ ('\u{518}', 1305), ('\u{51a}', 1307), ('\u{51c}', 1309), ('\u{51e}', 1311),
+ ('\u{520}', 1313), ('\u{522}', 1315), ('\u{524}', 1317), ('\u{526}', 1319),
+ ('\u{528}', 1321), ('\u{52a}', 1323), ('\u{52c}', 1325), ('\u{52e}', 1327),
+ ('\u{531}', 1377), ('\u{532}', 1378), ('\u{533}', 1379), ('\u{534}', 1380),
+ ('\u{535}', 1381), ('\u{536}', 1382), ('\u{537}', 1383), ('\u{538}', 1384),
+ ('\u{539}', 1385), ('\u{53a}', 1386), ('\u{53b}', 1387), ('\u{53c}', 1388),
+ ('\u{53d}', 1389), ('\u{53e}', 1390), ('\u{53f}', 1391), ('\u{540}', 1392),
+ ('\u{541}', 1393), ('\u{542}', 1394), ('\u{543}', 1395), ('\u{544}', 1396),
+ ('\u{545}', 1397), ('\u{546}', 1398), ('\u{547}', 1399), ('\u{548}', 1400),
+ ('\u{549}', 1401), ('\u{54a}', 1402), ('\u{54b}', 1403), ('\u{54c}', 1404),
+ ('\u{54d}', 1405), ('\u{54e}', 1406), ('\u{54f}', 1407), ('\u{550}', 1408),
+ ('\u{551}', 1409), ('\u{552}', 1410), ('\u{553}', 1411), ('\u{554}', 1412),
+ ('\u{555}', 1413), ('\u{556}', 1414), ('\u{10a0}', 11520), ('\u{10a1}', 11521),
+ ('\u{10a2}', 11522), ('\u{10a3}', 11523), ('\u{10a4}', 11524), ('\u{10a5}', 11525),
+ ('\u{10a6}', 11526), ('\u{10a7}', 11527), ('\u{10a8}', 11528), ('\u{10a9}', 11529),
+ ('\u{10aa}', 11530), ('\u{10ab}', 11531), ('\u{10ac}', 11532), ('\u{10ad}', 11533),
+ ('\u{10ae}', 11534), ('\u{10af}', 11535), ('\u{10b0}', 11536), ('\u{10b1}', 11537),
+ ('\u{10b2}', 11538), ('\u{10b3}', 11539), ('\u{10b4}', 11540), ('\u{10b5}', 11541),
+ ('\u{10b6}', 11542), ('\u{10b7}', 11543), ('\u{10b8}', 11544), ('\u{10b9}', 11545),
+ ('\u{10ba}', 11546), ('\u{10bb}', 11547), ('\u{10bc}', 11548), ('\u{10bd}', 11549),
+ ('\u{10be}', 11550), ('\u{10bf}', 11551), ('\u{10c0}', 11552), ('\u{10c1}', 11553),
+ ('\u{10c2}', 11554), ('\u{10c3}', 11555), ('\u{10c4}', 11556), ('\u{10c5}', 11557),
+ ('\u{10c7}', 11559), ('\u{10cd}', 11565), ('\u{13a0}', 43888), ('\u{13a1}', 43889),
+ ('\u{13a2}', 43890), ('\u{13a3}', 43891), ('\u{13a4}', 43892), ('\u{13a5}', 43893),
+ ('\u{13a6}', 43894), ('\u{13a7}', 43895), ('\u{13a8}', 43896), ('\u{13a9}', 43897),
+ ('\u{13aa}', 43898), ('\u{13ab}', 43899), ('\u{13ac}', 43900), ('\u{13ad}', 43901),
+ ('\u{13ae}', 43902), ('\u{13af}', 43903), ('\u{13b0}', 43904), ('\u{13b1}', 43905),
+ ('\u{13b2}', 43906), ('\u{13b3}', 43907), ('\u{13b4}', 43908), ('\u{13b5}', 43909),
+ ('\u{13b6}', 43910), ('\u{13b7}', 43911), ('\u{13b8}', 43912), ('\u{13b9}', 43913),
+ ('\u{13ba}', 43914), ('\u{13bb}', 43915), ('\u{13bc}', 43916), ('\u{13bd}', 43917),
+ ('\u{13be}', 43918), ('\u{13bf}', 43919), ('\u{13c0}', 43920), ('\u{13c1}', 43921),
+ ('\u{13c2}', 43922), ('\u{13c3}', 43923), ('\u{13c4}', 43924), ('\u{13c5}', 43925),
+ ('\u{13c6}', 43926), ('\u{13c7}', 43927), ('\u{13c8}', 43928), ('\u{13c9}', 43929),
+ ('\u{13ca}', 43930), ('\u{13cb}', 43931), ('\u{13cc}', 43932), ('\u{13cd}', 43933),
+ ('\u{13ce}', 43934), ('\u{13cf}', 43935), ('\u{13d0}', 43936), ('\u{13d1}', 43937),
+ ('\u{13d2}', 43938), ('\u{13d3}', 43939), ('\u{13d4}', 43940), ('\u{13d5}', 43941),
+ ('\u{13d6}', 43942), ('\u{13d7}', 43943), ('\u{13d8}', 43944), ('\u{13d9}', 43945),
+ ('\u{13da}', 43946), ('\u{13db}', 43947), ('\u{13dc}', 43948), ('\u{13dd}', 43949),
+ ('\u{13de}', 43950), ('\u{13df}', 43951), ('\u{13e0}', 43952), ('\u{13e1}', 43953),
+ ('\u{13e2}', 43954), ('\u{13e3}', 43955), ('\u{13e4}', 43956), ('\u{13e5}', 43957),
+ ('\u{13e6}', 43958), ('\u{13e7}', 43959), ('\u{13e8}', 43960), ('\u{13e9}', 43961),
+ ('\u{13ea}', 43962), ('\u{13eb}', 43963), ('\u{13ec}', 43964), ('\u{13ed}', 43965),
+ ('\u{13ee}', 43966), ('\u{13ef}', 43967), ('\u{13f0}', 5112), ('\u{13f1}', 5113),
+ ('\u{13f2}', 5114), ('\u{13f3}', 5115), ('\u{13f4}', 5116), ('\u{13f5}', 5117),
+ ('\u{1c90}', 4304), ('\u{1c91}', 4305), ('\u{1c92}', 4306), ('\u{1c93}', 4307),
+ ('\u{1c94}', 4308), ('\u{1c95}', 4309), ('\u{1c96}', 4310), ('\u{1c97}', 4311),
+ ('\u{1c98}', 4312), ('\u{1c99}', 4313), ('\u{1c9a}', 4314), ('\u{1c9b}', 4315),
+ ('\u{1c9c}', 4316), ('\u{1c9d}', 4317), ('\u{1c9e}', 4318), ('\u{1c9f}', 4319),
+ ('\u{1ca0}', 4320), ('\u{1ca1}', 4321), ('\u{1ca2}', 4322), ('\u{1ca3}', 4323),
+ ('\u{1ca4}', 4324), ('\u{1ca5}', 4325), ('\u{1ca6}', 4326), ('\u{1ca7}', 4327),
+ ('\u{1ca8}', 4328), ('\u{1ca9}', 4329), ('\u{1caa}', 4330), ('\u{1cab}', 4331),
+ ('\u{1cac}', 4332), ('\u{1cad}', 4333), ('\u{1cae}', 4334), ('\u{1caf}', 4335),
+ ('\u{1cb0}', 4336), ('\u{1cb1}', 4337), ('\u{1cb2}', 4338), ('\u{1cb3}', 4339),
+ ('\u{1cb4}', 4340), ('\u{1cb5}', 4341), ('\u{1cb6}', 4342), ('\u{1cb7}', 4343),
+ ('\u{1cb8}', 4344), ('\u{1cb9}', 4345), ('\u{1cba}', 4346), ('\u{1cbd}', 4349),
+ ('\u{1cbe}', 4350), ('\u{1cbf}', 4351), ('\u{1e00}', 7681), ('\u{1e02}', 7683),
+ ('\u{1e04}', 7685), ('\u{1e06}', 7687), ('\u{1e08}', 7689), ('\u{1e0a}', 7691),
+ ('\u{1e0c}', 7693), ('\u{1e0e}', 7695), ('\u{1e10}', 7697), ('\u{1e12}', 7699),
+ ('\u{1e14}', 7701), ('\u{1e16}', 7703), ('\u{1e18}', 7705), ('\u{1e1a}', 7707),
+ ('\u{1e1c}', 7709), ('\u{1e1e}', 7711), ('\u{1e20}', 7713), ('\u{1e22}', 7715),
+ ('\u{1e24}', 7717), ('\u{1e26}', 7719), ('\u{1e28}', 7721), ('\u{1e2a}', 7723),
+ ('\u{1e2c}', 7725), ('\u{1e2e}', 7727), ('\u{1e30}', 7729), ('\u{1e32}', 7731),
+ ('\u{1e34}', 7733), ('\u{1e36}', 7735), ('\u{1e38}', 7737), ('\u{1e3a}', 7739),
+ ('\u{1e3c}', 7741), ('\u{1e3e}', 7743), ('\u{1e40}', 7745), ('\u{1e42}', 7747),
+ ('\u{1e44}', 7749), ('\u{1e46}', 7751), ('\u{1e48}', 7753), ('\u{1e4a}', 7755),
+ ('\u{1e4c}', 7757), ('\u{1e4e}', 7759), ('\u{1e50}', 7761), ('\u{1e52}', 7763),
+ ('\u{1e54}', 7765), ('\u{1e56}', 7767), ('\u{1e58}', 7769), ('\u{1e5a}', 7771),
+ ('\u{1e5c}', 7773), ('\u{1e5e}', 7775), ('\u{1e60}', 7777), ('\u{1e62}', 7779),
+ ('\u{1e64}', 7781), ('\u{1e66}', 7783), ('\u{1e68}', 7785), ('\u{1e6a}', 7787),
+ ('\u{1e6c}', 7789), ('\u{1e6e}', 7791), ('\u{1e70}', 7793), ('\u{1e72}', 7795),
+ ('\u{1e74}', 7797), ('\u{1e76}', 7799), ('\u{1e78}', 7801), ('\u{1e7a}', 7803),
+ ('\u{1e7c}', 7805), ('\u{1e7e}', 7807), ('\u{1e80}', 7809), ('\u{1e82}', 7811),
+ ('\u{1e84}', 7813), ('\u{1e86}', 7815), ('\u{1e88}', 7817), ('\u{1e8a}', 7819),
+ ('\u{1e8c}', 7821), ('\u{1e8e}', 7823), ('\u{1e90}', 7825), ('\u{1e92}', 7827),
+ ('\u{1e94}', 7829), ('\u{1e9e}', 223), ('\u{1ea0}', 7841), ('\u{1ea2}', 7843),
+ ('\u{1ea4}', 7845), ('\u{1ea6}', 7847), ('\u{1ea8}', 7849), ('\u{1eaa}', 7851),
+ ('\u{1eac}', 7853), ('\u{1eae}', 7855), ('\u{1eb0}', 7857), ('\u{1eb2}', 7859),
+ ('\u{1eb4}', 7861), ('\u{1eb6}', 7863), ('\u{1eb8}', 7865), ('\u{1eba}', 7867),
+ ('\u{1ebc}', 7869), ('\u{1ebe}', 7871), ('\u{1ec0}', 7873), ('\u{1ec2}', 7875),
+ ('\u{1ec4}', 7877), ('\u{1ec6}', 7879), ('\u{1ec8}', 7881), ('\u{1eca}', 7883),
+ ('\u{1ecc}', 7885), ('\u{1ece}', 7887), ('\u{1ed0}', 7889), ('\u{1ed2}', 7891),
+ ('\u{1ed4}', 7893), ('\u{1ed6}', 7895), ('\u{1ed8}', 7897), ('\u{1eda}', 7899),
+ ('\u{1edc}', 7901), ('\u{1ede}', 7903), ('\u{1ee0}', 7905), ('\u{1ee2}', 7907),
+ ('\u{1ee4}', 7909), ('\u{1ee6}', 7911), ('\u{1ee8}', 7913), ('\u{1eea}', 7915),
+ ('\u{1eec}', 7917), ('\u{1eee}', 7919), ('\u{1ef0}', 7921), ('\u{1ef2}', 7923),
+ ('\u{1ef4}', 7925), ('\u{1ef6}', 7927), ('\u{1ef8}', 7929), ('\u{1efa}', 7931),
+ ('\u{1efc}', 7933), ('\u{1efe}', 7935), ('\u{1f08}', 7936), ('\u{1f09}', 7937),
+ ('\u{1f0a}', 7938), ('\u{1f0b}', 7939), ('\u{1f0c}', 7940), ('\u{1f0d}', 7941),
+ ('\u{1f0e}', 7942), ('\u{1f0f}', 7943), ('\u{1f18}', 7952), ('\u{1f19}', 7953),
+ ('\u{1f1a}', 7954), ('\u{1f1b}', 7955), ('\u{1f1c}', 7956), ('\u{1f1d}', 7957),
+ ('\u{1f28}', 7968), ('\u{1f29}', 7969), ('\u{1f2a}', 7970), ('\u{1f2b}', 7971),
+ ('\u{1f2c}', 7972), ('\u{1f2d}', 7973), ('\u{1f2e}', 7974), ('\u{1f2f}', 7975),
+ ('\u{1f38}', 7984), ('\u{1f39}', 7985), ('\u{1f3a}', 7986), ('\u{1f3b}', 7987),
+ ('\u{1f3c}', 7988), ('\u{1f3d}', 7989), ('\u{1f3e}', 7990), ('\u{1f3f}', 7991),
+ ('\u{1f48}', 8000), ('\u{1f49}', 8001), ('\u{1f4a}', 8002), ('\u{1f4b}', 8003),
+ ('\u{1f4c}', 8004), ('\u{1f4d}', 8005), ('\u{1f59}', 8017), ('\u{1f5b}', 8019),
+ ('\u{1f5d}', 8021), ('\u{1f5f}', 8023), ('\u{1f68}', 8032), ('\u{1f69}', 8033),
+ ('\u{1f6a}', 8034), ('\u{1f6b}', 8035), ('\u{1f6c}', 8036), ('\u{1f6d}', 8037),
+ ('\u{1f6e}', 8038), ('\u{1f6f}', 8039), ('\u{1f88}', 8064), ('\u{1f89}', 8065),
+ ('\u{1f8a}', 8066), ('\u{1f8b}', 8067), ('\u{1f8c}', 8068), ('\u{1f8d}', 8069),
+ ('\u{1f8e}', 8070), ('\u{1f8f}', 8071), ('\u{1f98}', 8080), ('\u{1f99}', 8081),
+ ('\u{1f9a}', 8082), ('\u{1f9b}', 8083), ('\u{1f9c}', 8084), ('\u{1f9d}', 8085),
+ ('\u{1f9e}', 8086), ('\u{1f9f}', 8087), ('\u{1fa8}', 8096), ('\u{1fa9}', 8097),
+ ('\u{1faa}', 8098), ('\u{1fab}', 8099), ('\u{1fac}', 8100), ('\u{1fad}', 8101),
+ ('\u{1fae}', 8102), ('\u{1faf}', 8103), ('\u{1fb8}', 8112), ('\u{1fb9}', 8113),
+ ('\u{1fba}', 8048), ('\u{1fbb}', 8049), ('\u{1fbc}', 8115), ('\u{1fc8}', 8050),
+ ('\u{1fc9}', 8051), ('\u{1fca}', 8052), ('\u{1fcb}', 8053), ('\u{1fcc}', 8131),
+ ('\u{1fd8}', 8144), ('\u{1fd9}', 8145), ('\u{1fda}', 8054), ('\u{1fdb}', 8055),
+ ('\u{1fe8}', 8160), ('\u{1fe9}', 8161), ('\u{1fea}', 8058), ('\u{1feb}', 8059),
+ ('\u{1fec}', 8165), ('\u{1ff8}', 8056), ('\u{1ff9}', 8057), ('\u{1ffa}', 8060),
+ ('\u{1ffb}', 8061), ('\u{1ffc}', 8179), ('\u{2126}', 969), ('\u{212a}', 107),
+ ('\u{212b}', 229), ('\u{2132}', 8526), ('\u{2160}', 8560), ('\u{2161}', 8561),
+ ('\u{2162}', 8562), ('\u{2163}', 8563), ('\u{2164}', 8564), ('\u{2165}', 8565),
+ ('\u{2166}', 8566), ('\u{2167}', 8567), ('\u{2168}', 8568), ('\u{2169}', 8569),
+ ('\u{216a}', 8570), ('\u{216b}', 8571), ('\u{216c}', 8572), ('\u{216d}', 8573),
+ ('\u{216e}', 8574), ('\u{216f}', 8575), ('\u{2183}', 8580), ('\u{24b6}', 9424),
+ ('\u{24b7}', 9425), ('\u{24b8}', 9426), ('\u{24b9}', 9427), ('\u{24ba}', 9428),
+ ('\u{24bb}', 9429), ('\u{24bc}', 9430), ('\u{24bd}', 9431), ('\u{24be}', 9432),
+ ('\u{24bf}', 9433), ('\u{24c0}', 9434), ('\u{24c1}', 9435), ('\u{24c2}', 9436),
+ ('\u{24c3}', 9437), ('\u{24c4}', 9438), ('\u{24c5}', 9439), ('\u{24c6}', 9440),
+ ('\u{24c7}', 9441), ('\u{24c8}', 9442), ('\u{24c9}', 9443), ('\u{24ca}', 9444),
+ ('\u{24cb}', 9445), ('\u{24cc}', 9446), ('\u{24cd}', 9447), ('\u{24ce}', 9448),
+ ('\u{24cf}', 9449), ('\u{2c00}', 11312), ('\u{2c01}', 11313), ('\u{2c02}', 11314),
+ ('\u{2c03}', 11315), ('\u{2c04}', 11316), ('\u{2c05}', 11317), ('\u{2c06}', 11318),
+ ('\u{2c07}', 11319), ('\u{2c08}', 11320), ('\u{2c09}', 11321), ('\u{2c0a}', 11322),
+ ('\u{2c0b}', 11323), ('\u{2c0c}', 11324), ('\u{2c0d}', 11325), ('\u{2c0e}', 11326),
+ ('\u{2c0f}', 11327), ('\u{2c10}', 11328), ('\u{2c11}', 11329), ('\u{2c12}', 11330),
+ ('\u{2c13}', 11331), ('\u{2c14}', 11332), ('\u{2c15}', 11333), ('\u{2c16}', 11334),
+ ('\u{2c17}', 11335), ('\u{2c18}', 11336), ('\u{2c19}', 11337), ('\u{2c1a}', 11338),
+ ('\u{2c1b}', 11339), ('\u{2c1c}', 11340), ('\u{2c1d}', 11341), ('\u{2c1e}', 11342),
+ ('\u{2c1f}', 11343), ('\u{2c20}', 11344), ('\u{2c21}', 11345), ('\u{2c22}', 11346),
+ ('\u{2c23}', 11347), ('\u{2c24}', 11348), ('\u{2c25}', 11349), ('\u{2c26}', 11350),
+ ('\u{2c27}', 11351), ('\u{2c28}', 11352), ('\u{2c29}', 11353), ('\u{2c2a}', 11354),
+ ('\u{2c2b}', 11355), ('\u{2c2c}', 11356), ('\u{2c2d}', 11357), ('\u{2c2e}', 11358),
+ ('\u{2c2f}', 11359), ('\u{2c60}', 11361), ('\u{2c62}', 619), ('\u{2c63}', 7549),
+ ('\u{2c64}', 637), ('\u{2c67}', 11368), ('\u{2c69}', 11370), ('\u{2c6b}', 11372),
+ ('\u{2c6d}', 593), ('\u{2c6e}', 625), ('\u{2c6f}', 592), ('\u{2c70}', 594),
+ ('\u{2c72}', 11379), ('\u{2c75}', 11382), ('\u{2c7e}', 575), ('\u{2c7f}', 576),
+ ('\u{2c80}', 11393), ('\u{2c82}', 11395), ('\u{2c84}', 11397), ('\u{2c86}', 11399),
+ ('\u{2c88}', 11401), ('\u{2c8a}', 11403), ('\u{2c8c}', 11405), ('\u{2c8e}', 11407),
+ ('\u{2c90}', 11409), ('\u{2c92}', 11411), ('\u{2c94}', 11413), ('\u{2c96}', 11415),
+ ('\u{2c98}', 11417), ('\u{2c9a}', 11419), ('\u{2c9c}', 11421), ('\u{2c9e}', 11423),
+ ('\u{2ca0}', 11425), ('\u{2ca2}', 11427), ('\u{2ca4}', 11429), ('\u{2ca6}', 11431),
+ ('\u{2ca8}', 11433), ('\u{2caa}', 11435), ('\u{2cac}', 11437), ('\u{2cae}', 11439),
+ ('\u{2cb0}', 11441), ('\u{2cb2}', 11443), ('\u{2cb4}', 11445), ('\u{2cb6}', 11447),
+ ('\u{2cb8}', 11449), ('\u{2cba}', 11451), ('\u{2cbc}', 11453), ('\u{2cbe}', 11455),
+ ('\u{2cc0}', 11457), ('\u{2cc2}', 11459), ('\u{2cc4}', 11461), ('\u{2cc6}', 11463),
+ ('\u{2cc8}', 11465), ('\u{2cca}', 11467), ('\u{2ccc}', 11469), ('\u{2cce}', 11471),
+ ('\u{2cd0}', 11473), ('\u{2cd2}', 11475), ('\u{2cd4}', 11477), ('\u{2cd6}', 11479),
+ ('\u{2cd8}', 11481), ('\u{2cda}', 11483), ('\u{2cdc}', 11485), ('\u{2cde}', 11487),
+ ('\u{2ce0}', 11489), ('\u{2ce2}', 11491), ('\u{2ceb}', 11500), ('\u{2ced}', 11502),
+ ('\u{2cf2}', 11507), ('\u{a640}', 42561), ('\u{a642}', 42563), ('\u{a644}', 42565),
+ ('\u{a646}', 42567), ('\u{a648}', 42569), ('\u{a64a}', 42571), ('\u{a64c}', 42573),
+ ('\u{a64e}', 42575), ('\u{a650}', 42577), ('\u{a652}', 42579), ('\u{a654}', 42581),
+ ('\u{a656}', 42583), ('\u{a658}', 42585), ('\u{a65a}', 42587), ('\u{a65c}', 42589),
+ ('\u{a65e}', 42591), ('\u{a660}', 42593), ('\u{a662}', 42595), ('\u{a664}', 42597),
+ ('\u{a666}', 42599), ('\u{a668}', 42601), ('\u{a66a}', 42603), ('\u{a66c}', 42605),
+ ('\u{a680}', 42625), ('\u{a682}', 42627), ('\u{a684}', 42629), ('\u{a686}', 42631),
+ ('\u{a688}', 42633), ('\u{a68a}', 42635), ('\u{a68c}', 42637), ('\u{a68e}', 42639),
+ ('\u{a690}', 42641), ('\u{a692}', 42643), ('\u{a694}', 42645), ('\u{a696}', 42647),
+ ('\u{a698}', 42649), ('\u{a69a}', 42651), ('\u{a722}', 42787), ('\u{a724}', 42789),
+ ('\u{a726}', 42791), ('\u{a728}', 42793), ('\u{a72a}', 42795), ('\u{a72c}', 42797),
+ ('\u{a72e}', 42799), ('\u{a732}', 42803), ('\u{a734}', 42805), ('\u{a736}', 42807),
+ ('\u{a738}', 42809), ('\u{a73a}', 42811), ('\u{a73c}', 42813), ('\u{a73e}', 42815),
+ ('\u{a740}', 42817), ('\u{a742}', 42819), ('\u{a744}', 42821), ('\u{a746}', 42823),
+ ('\u{a748}', 42825), ('\u{a74a}', 42827), ('\u{a74c}', 42829), ('\u{a74e}', 42831),
+ ('\u{a750}', 42833), ('\u{a752}', 42835), ('\u{a754}', 42837), ('\u{a756}', 42839),
+ ('\u{a758}', 42841), ('\u{a75a}', 42843), ('\u{a75c}', 42845), ('\u{a75e}', 42847),
+ ('\u{a760}', 42849), ('\u{a762}', 42851), ('\u{a764}', 42853), ('\u{a766}', 42855),
+ ('\u{a768}', 42857), ('\u{a76a}', 42859), ('\u{a76c}', 42861), ('\u{a76e}', 42863),
+ ('\u{a779}', 42874), ('\u{a77b}', 42876), ('\u{a77d}', 7545), ('\u{a77e}', 42879),
+ ('\u{a780}', 42881), ('\u{a782}', 42883), ('\u{a784}', 42885), ('\u{a786}', 42887),
+ ('\u{a78b}', 42892), ('\u{a78d}', 613), ('\u{a790}', 42897), ('\u{a792}', 42899),
+ ('\u{a796}', 42903), ('\u{a798}', 42905), ('\u{a79a}', 42907), ('\u{a79c}', 42909),
+ ('\u{a79e}', 42911), ('\u{a7a0}', 42913), ('\u{a7a2}', 42915), ('\u{a7a4}', 42917),
+ ('\u{a7a6}', 42919), ('\u{a7a8}', 42921), ('\u{a7aa}', 614), ('\u{a7ab}', 604),
+ ('\u{a7ac}', 609), ('\u{a7ad}', 620), ('\u{a7ae}', 618), ('\u{a7b0}', 670),
+ ('\u{a7b1}', 647), ('\u{a7b2}', 669), ('\u{a7b3}', 43859), ('\u{a7b4}', 42933),
+ ('\u{a7b6}', 42935), ('\u{a7b8}', 42937), ('\u{a7ba}', 42939), ('\u{a7bc}', 42941),
+ ('\u{a7be}', 42943), ('\u{a7c0}', 42945), ('\u{a7c2}', 42947), ('\u{a7c4}', 42900),
+ ('\u{a7c5}', 642), ('\u{a7c6}', 7566), ('\u{a7c7}', 42952), ('\u{a7c9}', 42954),
+ ('\u{a7d0}', 42961), ('\u{a7d6}', 42967), ('\u{a7d8}', 42969), ('\u{a7f5}', 42998),
+ ('\u{ff21}', 65345), ('\u{ff22}', 65346), ('\u{ff23}', 65347), ('\u{ff24}', 65348),
+ ('\u{ff25}', 65349), ('\u{ff26}', 65350), ('\u{ff27}', 65351), ('\u{ff28}', 65352),
+ ('\u{ff29}', 65353), ('\u{ff2a}', 65354), ('\u{ff2b}', 65355), ('\u{ff2c}', 65356),
+ ('\u{ff2d}', 65357), ('\u{ff2e}', 65358), ('\u{ff2f}', 65359), ('\u{ff30}', 65360),
+ ('\u{ff31}', 65361), ('\u{ff32}', 65362), ('\u{ff33}', 65363), ('\u{ff34}', 65364),
+ ('\u{ff35}', 65365), ('\u{ff36}', 65366), ('\u{ff37}', 65367), ('\u{ff38}', 65368),
+ ('\u{ff39}', 65369), ('\u{ff3a}', 65370), ('\u{10400}', 66600), ('\u{10401}', 66601),
+ ('\u{10402}', 66602), ('\u{10403}', 66603), ('\u{10404}', 66604), ('\u{10405}', 66605),
+ ('\u{10406}', 66606), ('\u{10407}', 66607), ('\u{10408}', 66608), ('\u{10409}', 66609),
+ ('\u{1040a}', 66610), ('\u{1040b}', 66611), ('\u{1040c}', 66612), ('\u{1040d}', 66613),
+ ('\u{1040e}', 66614), ('\u{1040f}', 66615), ('\u{10410}', 66616), ('\u{10411}', 66617),
+ ('\u{10412}', 66618), ('\u{10413}', 66619), ('\u{10414}', 66620), ('\u{10415}', 66621),
+ ('\u{10416}', 66622), ('\u{10417}', 66623), ('\u{10418}', 66624), ('\u{10419}', 66625),
+ ('\u{1041a}', 66626), ('\u{1041b}', 66627), ('\u{1041c}', 66628), ('\u{1041d}', 66629),
+ ('\u{1041e}', 66630), ('\u{1041f}', 66631), ('\u{10420}', 66632), ('\u{10421}', 66633),
+ ('\u{10422}', 66634), ('\u{10423}', 66635), ('\u{10424}', 66636), ('\u{10425}', 66637),
+ ('\u{10426}', 66638), ('\u{10427}', 66639), ('\u{104b0}', 66776), ('\u{104b1}', 66777),
+ ('\u{104b2}', 66778), ('\u{104b3}', 66779), ('\u{104b4}', 66780), ('\u{104b5}', 66781),
+ ('\u{104b6}', 66782), ('\u{104b7}', 66783), ('\u{104b8}', 66784), ('\u{104b9}', 66785),
+ ('\u{104ba}', 66786), ('\u{104bb}', 66787), ('\u{104bc}', 66788), ('\u{104bd}', 66789),
+ ('\u{104be}', 66790), ('\u{104bf}', 66791), ('\u{104c0}', 66792), ('\u{104c1}', 66793),
+ ('\u{104c2}', 66794), ('\u{104c3}', 66795), ('\u{104c4}', 66796), ('\u{104c5}', 66797),
+ ('\u{104c6}', 66798), ('\u{104c7}', 66799), ('\u{104c8}', 66800), ('\u{104c9}', 66801),
+ ('\u{104ca}', 66802), ('\u{104cb}', 66803), ('\u{104cc}', 66804), ('\u{104cd}', 66805),
+ ('\u{104ce}', 66806), ('\u{104cf}', 66807), ('\u{104d0}', 66808), ('\u{104d1}', 66809),
+ ('\u{104d2}', 66810), ('\u{104d3}', 66811), ('\u{10570}', 66967), ('\u{10571}', 66968),
+ ('\u{10572}', 66969), ('\u{10573}', 66970), ('\u{10574}', 66971), ('\u{10575}', 66972),
+ ('\u{10576}', 66973), ('\u{10577}', 66974), ('\u{10578}', 66975), ('\u{10579}', 66976),
+ ('\u{1057a}', 66977), ('\u{1057c}', 66979), ('\u{1057d}', 66980), ('\u{1057e}', 66981),
+ ('\u{1057f}', 66982), ('\u{10580}', 66983), ('\u{10581}', 66984), ('\u{10582}', 66985),
+ ('\u{10583}', 66986), ('\u{10584}', 66987), ('\u{10585}', 66988), ('\u{10586}', 66989),
+ ('\u{10587}', 66990), ('\u{10588}', 66991), ('\u{10589}', 66992), ('\u{1058a}', 66993),
+ ('\u{1058c}', 66995), ('\u{1058d}', 66996), ('\u{1058e}', 66997), ('\u{1058f}', 66998),
+ ('\u{10590}', 66999), ('\u{10591}', 67000), ('\u{10592}', 67001), ('\u{10594}', 67003),
+ ('\u{10595}', 67004), ('\u{10c80}', 68800), ('\u{10c81}', 68801), ('\u{10c82}', 68802),
+ ('\u{10c83}', 68803), ('\u{10c84}', 68804), ('\u{10c85}', 68805), ('\u{10c86}', 68806),
+ ('\u{10c87}', 68807), ('\u{10c88}', 68808), ('\u{10c89}', 68809), ('\u{10c8a}', 68810),
+ ('\u{10c8b}', 68811), ('\u{10c8c}', 68812), ('\u{10c8d}', 68813), ('\u{10c8e}', 68814),
+ ('\u{10c8f}', 68815), ('\u{10c90}', 68816), ('\u{10c91}', 68817), ('\u{10c92}', 68818),
+ ('\u{10c93}', 68819), ('\u{10c94}', 68820), ('\u{10c95}', 68821), ('\u{10c96}', 68822),
+ ('\u{10c97}', 68823), ('\u{10c98}', 68824), ('\u{10c99}', 68825), ('\u{10c9a}', 68826),
+ ('\u{10c9b}', 68827), ('\u{10c9c}', 68828), ('\u{10c9d}', 68829), ('\u{10c9e}', 68830),
+ ('\u{10c9f}', 68831), ('\u{10ca0}', 68832), ('\u{10ca1}', 68833), ('\u{10ca2}', 68834),
+ ('\u{10ca3}', 68835), ('\u{10ca4}', 68836), ('\u{10ca5}', 68837), ('\u{10ca6}', 68838),
+ ('\u{10ca7}', 68839), ('\u{10ca8}', 68840), ('\u{10ca9}', 68841), ('\u{10caa}', 68842),
+ ('\u{10cab}', 68843), ('\u{10cac}', 68844), ('\u{10cad}', 68845), ('\u{10cae}', 68846),
+ ('\u{10caf}', 68847), ('\u{10cb0}', 68848), ('\u{10cb1}', 68849), ('\u{10cb2}', 68850),
+ ('\u{118a0}', 71872), ('\u{118a1}', 71873), ('\u{118a2}', 71874), ('\u{118a3}', 71875),
+ ('\u{118a4}', 71876), ('\u{118a5}', 71877), ('\u{118a6}', 71878), ('\u{118a7}', 71879),
+ ('\u{118a8}', 71880), ('\u{118a9}', 71881), ('\u{118aa}', 71882), ('\u{118ab}', 71883),
+ ('\u{118ac}', 71884), ('\u{118ad}', 71885), ('\u{118ae}', 71886), ('\u{118af}', 71887),
+ ('\u{118b0}', 71888), ('\u{118b1}', 71889), ('\u{118b2}', 71890), ('\u{118b3}', 71891),
+ ('\u{118b4}', 71892), ('\u{118b5}', 71893), ('\u{118b6}', 71894), ('\u{118b7}', 71895),
+ ('\u{118b8}', 71896), ('\u{118b9}', 71897), ('\u{118ba}', 71898), ('\u{118bb}', 71899),
+ ('\u{118bc}', 71900), ('\u{118bd}', 71901), ('\u{118be}', 71902), ('\u{118bf}', 71903),
+ ('\u{16e40}', 93792), ('\u{16e41}', 93793), ('\u{16e42}', 93794), ('\u{16e43}', 93795),
+ ('\u{16e44}', 93796), ('\u{16e45}', 93797), ('\u{16e46}', 93798), ('\u{16e47}', 93799),
+ ('\u{16e48}', 93800), ('\u{16e49}', 93801), ('\u{16e4a}', 93802), ('\u{16e4b}', 93803),
+ ('\u{16e4c}', 93804), ('\u{16e4d}', 93805), ('\u{16e4e}', 93806), ('\u{16e4f}', 93807),
+ ('\u{16e50}', 93808), ('\u{16e51}', 93809), ('\u{16e52}', 93810), ('\u{16e53}', 93811),
+ ('\u{16e54}', 93812), ('\u{16e55}', 93813), ('\u{16e56}', 93814), ('\u{16e57}', 93815),
+ ('\u{16e58}', 93816), ('\u{16e59}', 93817), ('\u{16e5a}', 93818), ('\u{16e5b}', 93819),
+ ('\u{16e5c}', 93820), ('\u{16e5d}', 93821), ('\u{16e5e}', 93822), ('\u{16e5f}', 93823),
+ ('\u{1e900}', 125218), ('\u{1e901}', 125219), ('\u{1e902}', 125220), ('\u{1e903}', 125221),
+ ('\u{1e904}', 125222), ('\u{1e905}', 125223), ('\u{1e906}', 125224), ('\u{1e907}', 125225),
+ ('\u{1e908}', 125226), ('\u{1e909}', 125227), ('\u{1e90a}', 125228), ('\u{1e90b}', 125229),
+ ('\u{1e90c}', 125230), ('\u{1e90d}', 125231), ('\u{1e90e}', 125232), ('\u{1e90f}', 125233),
+ ('\u{1e910}', 125234), ('\u{1e911}', 125235), ('\u{1e912}', 125236), ('\u{1e913}', 125237),
+ ('\u{1e914}', 125238), ('\u{1e915}', 125239), ('\u{1e916}', 125240), ('\u{1e917}', 125241),
+ ('\u{1e918}', 125242), ('\u{1e919}', 125243), ('\u{1e91a}', 125244), ('\u{1e91b}', 125245),
+ ('\u{1e91c}', 125246), ('\u{1e91d}', 125247), ('\u{1e91e}', 125248), ('\u{1e91f}', 125249),
+ ('\u{1e920}', 125250), ('\u{1e921}', 125251),
+ ];
+
+ static LOWERCASE_TABLE_MULTI: &[[char; 3]] = &[
+ ['i', '\u{307}', '\u{0}'],
+ ];
+
+ static UPPERCASE_TABLE: &[(char, u32)] = &[
+ ('\u{b5}', 924), ('\u{df}', 4194304), ('\u{e0}', 192), ('\u{e1}', 193), ('\u{e2}', 194),
+ ('\u{e3}', 195), ('\u{e4}', 196), ('\u{e5}', 197), ('\u{e6}', 198), ('\u{e7}', 199),
+ ('\u{e8}', 200), ('\u{e9}', 201), ('\u{ea}', 202), ('\u{eb}', 203), ('\u{ec}', 204),
+ ('\u{ed}', 205), ('\u{ee}', 206), ('\u{ef}', 207), ('\u{f0}', 208), ('\u{f1}', 209),
+ ('\u{f2}', 210), ('\u{f3}', 211), ('\u{f4}', 212), ('\u{f5}', 213), ('\u{f6}', 214),
+ ('\u{f8}', 216), ('\u{f9}', 217), ('\u{fa}', 218), ('\u{fb}', 219), ('\u{fc}', 220),
+ ('\u{fd}', 221), ('\u{fe}', 222), ('\u{ff}', 376), ('\u{101}', 256), ('\u{103}', 258),
+ ('\u{105}', 260), ('\u{107}', 262), ('\u{109}', 264), ('\u{10b}', 266), ('\u{10d}', 268),
+ ('\u{10f}', 270), ('\u{111}', 272), ('\u{113}', 274), ('\u{115}', 276), ('\u{117}', 278),
+ ('\u{119}', 280), ('\u{11b}', 282), ('\u{11d}', 284), ('\u{11f}', 286), ('\u{121}', 288),
+ ('\u{123}', 290), ('\u{125}', 292), ('\u{127}', 294), ('\u{129}', 296), ('\u{12b}', 298),
+ ('\u{12d}', 300), ('\u{12f}', 302), ('\u{131}', 73), ('\u{133}', 306), ('\u{135}', 308),
+ ('\u{137}', 310), ('\u{13a}', 313), ('\u{13c}', 315), ('\u{13e}', 317), ('\u{140}', 319),
+ ('\u{142}', 321), ('\u{144}', 323), ('\u{146}', 325), ('\u{148}', 327),
+ ('\u{149}', 4194305), ('\u{14b}', 330), ('\u{14d}', 332), ('\u{14f}', 334),
+ ('\u{151}', 336), ('\u{153}', 338), ('\u{155}', 340), ('\u{157}', 342), ('\u{159}', 344),
+ ('\u{15b}', 346), ('\u{15d}', 348), ('\u{15f}', 350), ('\u{161}', 352), ('\u{163}', 354),
+ ('\u{165}', 356), ('\u{167}', 358), ('\u{169}', 360), ('\u{16b}', 362), ('\u{16d}', 364),
+ ('\u{16f}', 366), ('\u{171}', 368), ('\u{173}', 370), ('\u{175}', 372), ('\u{177}', 374),
+ ('\u{17a}', 377), ('\u{17c}', 379), ('\u{17e}', 381), ('\u{17f}', 83), ('\u{180}', 579),
+ ('\u{183}', 386), ('\u{185}', 388), ('\u{188}', 391), ('\u{18c}', 395), ('\u{192}', 401),
+ ('\u{195}', 502), ('\u{199}', 408), ('\u{19a}', 573), ('\u{19e}', 544), ('\u{1a1}', 416),
+ ('\u{1a3}', 418), ('\u{1a5}', 420), ('\u{1a8}', 423), ('\u{1ad}', 428), ('\u{1b0}', 431),
+ ('\u{1b4}', 435), ('\u{1b6}', 437), ('\u{1b9}', 440), ('\u{1bd}', 444), ('\u{1bf}', 503),
+ ('\u{1c5}', 452), ('\u{1c6}', 452), ('\u{1c8}', 455), ('\u{1c9}', 455), ('\u{1cb}', 458),
+ ('\u{1cc}', 458), ('\u{1ce}', 461), ('\u{1d0}', 463), ('\u{1d2}', 465), ('\u{1d4}', 467),
+ ('\u{1d6}', 469), ('\u{1d8}', 471), ('\u{1da}', 473), ('\u{1dc}', 475), ('\u{1dd}', 398),
+ ('\u{1df}', 478), ('\u{1e1}', 480), ('\u{1e3}', 482), ('\u{1e5}', 484), ('\u{1e7}', 486),
+ ('\u{1e9}', 488), ('\u{1eb}', 490), ('\u{1ed}', 492), ('\u{1ef}', 494),
+ ('\u{1f0}', 4194306), ('\u{1f2}', 497), ('\u{1f3}', 497), ('\u{1f5}', 500),
+ ('\u{1f9}', 504), ('\u{1fb}', 506), ('\u{1fd}', 508), ('\u{1ff}', 510), ('\u{201}', 512),
+ ('\u{203}', 514), ('\u{205}', 516), ('\u{207}', 518), ('\u{209}', 520), ('\u{20b}', 522),
+ ('\u{20d}', 524), ('\u{20f}', 526), ('\u{211}', 528), ('\u{213}', 530), ('\u{215}', 532),
+ ('\u{217}', 534), ('\u{219}', 536), ('\u{21b}', 538), ('\u{21d}', 540), ('\u{21f}', 542),
+ ('\u{223}', 546), ('\u{225}', 548), ('\u{227}', 550), ('\u{229}', 552), ('\u{22b}', 554),
+ ('\u{22d}', 556), ('\u{22f}', 558), ('\u{231}', 560), ('\u{233}', 562), ('\u{23c}', 571),
+ ('\u{23f}', 11390), ('\u{240}', 11391), ('\u{242}', 577), ('\u{247}', 582),
+ ('\u{249}', 584), ('\u{24b}', 586), ('\u{24d}', 588), ('\u{24f}', 590), ('\u{250}', 11375),
+ ('\u{251}', 11373), ('\u{252}', 11376), ('\u{253}', 385), ('\u{254}', 390),
+ ('\u{256}', 393), ('\u{257}', 394), ('\u{259}', 399), ('\u{25b}', 400), ('\u{25c}', 42923),
+ ('\u{260}', 403), ('\u{261}', 42924), ('\u{263}', 404), ('\u{265}', 42893),
+ ('\u{266}', 42922), ('\u{268}', 407), ('\u{269}', 406), ('\u{26a}', 42926),
+ ('\u{26b}', 11362), ('\u{26c}', 42925), ('\u{26f}', 412), ('\u{271}', 11374),
+ ('\u{272}', 413), ('\u{275}', 415), ('\u{27d}', 11364), ('\u{280}', 422),
+ ('\u{282}', 42949), ('\u{283}', 425), ('\u{287}', 42929), ('\u{288}', 430),
+ ('\u{289}', 580), ('\u{28a}', 433), ('\u{28b}', 434), ('\u{28c}', 581), ('\u{292}', 439),
+ ('\u{29d}', 42930), ('\u{29e}', 42928), ('\u{345}', 921), ('\u{371}', 880),
+ ('\u{373}', 882), ('\u{377}', 886), ('\u{37b}', 1021), ('\u{37c}', 1022), ('\u{37d}', 1023),
+ ('\u{390}', 4194307), ('\u{3ac}', 902), ('\u{3ad}', 904), ('\u{3ae}', 905),
+ ('\u{3af}', 906), ('\u{3b0}', 4194308), ('\u{3b1}', 913), ('\u{3b2}', 914),
+ ('\u{3b3}', 915), ('\u{3b4}', 916), ('\u{3b5}', 917), ('\u{3b6}', 918), ('\u{3b7}', 919),
+ ('\u{3b8}', 920), ('\u{3b9}', 921), ('\u{3ba}', 922), ('\u{3bb}', 923), ('\u{3bc}', 924),
+ ('\u{3bd}', 925), ('\u{3be}', 926), ('\u{3bf}', 927), ('\u{3c0}', 928), ('\u{3c1}', 929),
+ ('\u{3c2}', 931), ('\u{3c3}', 931), ('\u{3c4}', 932), ('\u{3c5}', 933), ('\u{3c6}', 934),
+ ('\u{3c7}', 935), ('\u{3c8}', 936), ('\u{3c9}', 937), ('\u{3ca}', 938), ('\u{3cb}', 939),
+ ('\u{3cc}', 908), ('\u{3cd}', 910), ('\u{3ce}', 911), ('\u{3d0}', 914), ('\u{3d1}', 920),
+ ('\u{3d5}', 934), ('\u{3d6}', 928), ('\u{3d7}', 975), ('\u{3d9}', 984), ('\u{3db}', 986),
+ ('\u{3dd}', 988), ('\u{3df}', 990), ('\u{3e1}', 992), ('\u{3e3}', 994), ('\u{3e5}', 996),
+ ('\u{3e7}', 998), ('\u{3e9}', 1000), ('\u{3eb}', 1002), ('\u{3ed}', 1004),
+ ('\u{3ef}', 1006), ('\u{3f0}', 922), ('\u{3f1}', 929), ('\u{3f2}', 1017), ('\u{3f3}', 895),
+ ('\u{3f5}', 917), ('\u{3f8}', 1015), ('\u{3fb}', 1018), ('\u{430}', 1040),
+ ('\u{431}', 1041), ('\u{432}', 1042), ('\u{433}', 1043), ('\u{434}', 1044),
+ ('\u{435}', 1045), ('\u{436}', 1046), ('\u{437}', 1047), ('\u{438}', 1048),
+ ('\u{439}', 1049), ('\u{43a}', 1050), ('\u{43b}', 1051), ('\u{43c}', 1052),
+ ('\u{43d}', 1053), ('\u{43e}', 1054), ('\u{43f}', 1055), ('\u{440}', 1056),
+ ('\u{441}', 1057), ('\u{442}', 1058), ('\u{443}', 1059), ('\u{444}', 1060),
+ ('\u{445}', 1061), ('\u{446}', 1062), ('\u{447}', 1063), ('\u{448}', 1064),
+ ('\u{449}', 1065), ('\u{44a}', 1066), ('\u{44b}', 1067), ('\u{44c}', 1068),
+ ('\u{44d}', 1069), ('\u{44e}', 1070), ('\u{44f}', 1071), ('\u{450}', 1024),
+ ('\u{451}', 1025), ('\u{452}', 1026), ('\u{453}', 1027), ('\u{454}', 1028),
+ ('\u{455}', 1029), ('\u{456}', 1030), ('\u{457}', 1031), ('\u{458}', 1032),
+ ('\u{459}', 1033), ('\u{45a}', 1034), ('\u{45b}', 1035), ('\u{45c}', 1036),
+ ('\u{45d}', 1037), ('\u{45e}', 1038), ('\u{45f}', 1039), ('\u{461}', 1120),
+ ('\u{463}', 1122), ('\u{465}', 1124), ('\u{467}', 1126), ('\u{469}', 1128),
+ ('\u{46b}', 1130), ('\u{46d}', 1132), ('\u{46f}', 1134), ('\u{471}', 1136),
+ ('\u{473}', 1138), ('\u{475}', 1140), ('\u{477}', 1142), ('\u{479}', 1144),
+ ('\u{47b}', 1146), ('\u{47d}', 1148), ('\u{47f}', 1150), ('\u{481}', 1152),
+ ('\u{48b}', 1162), ('\u{48d}', 1164), ('\u{48f}', 1166), ('\u{491}', 1168),
+ ('\u{493}', 1170), ('\u{495}', 1172), ('\u{497}', 1174), ('\u{499}', 1176),
+ ('\u{49b}', 1178), ('\u{49d}', 1180), ('\u{49f}', 1182), ('\u{4a1}', 1184),
+ ('\u{4a3}', 1186), ('\u{4a5}', 1188), ('\u{4a7}', 1190), ('\u{4a9}', 1192),
+ ('\u{4ab}', 1194), ('\u{4ad}', 1196), ('\u{4af}', 1198), ('\u{4b1}', 1200),
+ ('\u{4b3}', 1202), ('\u{4b5}', 1204), ('\u{4b7}', 1206), ('\u{4b9}', 1208),
+ ('\u{4bb}', 1210), ('\u{4bd}', 1212), ('\u{4bf}', 1214), ('\u{4c2}', 1217),
+ ('\u{4c4}', 1219), ('\u{4c6}', 1221), ('\u{4c8}', 1223), ('\u{4ca}', 1225),
+ ('\u{4cc}', 1227), ('\u{4ce}', 1229), ('\u{4cf}', 1216), ('\u{4d1}', 1232),
+ ('\u{4d3}', 1234), ('\u{4d5}', 1236), ('\u{4d7}', 1238), ('\u{4d9}', 1240),
+ ('\u{4db}', 1242), ('\u{4dd}', 1244), ('\u{4df}', 1246), ('\u{4e1}', 1248),
+ ('\u{4e3}', 1250), ('\u{4e5}', 1252), ('\u{4e7}', 1254), ('\u{4e9}', 1256),
+ ('\u{4eb}', 1258), ('\u{4ed}', 1260), ('\u{4ef}', 1262), ('\u{4f1}', 1264),
+ ('\u{4f3}', 1266), ('\u{4f5}', 1268), ('\u{4f7}', 1270), ('\u{4f9}', 1272),
+ ('\u{4fb}', 1274), ('\u{4fd}', 1276), ('\u{4ff}', 1278), ('\u{501}', 1280),
+ ('\u{503}', 1282), ('\u{505}', 1284), ('\u{507}', 1286), ('\u{509}', 1288),
+ ('\u{50b}', 1290), ('\u{50d}', 1292), ('\u{50f}', 1294), ('\u{511}', 1296),
+ ('\u{513}', 1298), ('\u{515}', 1300), ('\u{517}', 1302), ('\u{519}', 1304),
+ ('\u{51b}', 1306), ('\u{51d}', 1308), ('\u{51f}', 1310), ('\u{521}', 1312),
+ ('\u{523}', 1314), ('\u{525}', 1316), ('\u{527}', 1318), ('\u{529}', 1320),
+ ('\u{52b}', 1322), ('\u{52d}', 1324), ('\u{52f}', 1326), ('\u{561}', 1329),
+ ('\u{562}', 1330), ('\u{563}', 1331), ('\u{564}', 1332), ('\u{565}', 1333),
+ ('\u{566}', 1334), ('\u{567}', 1335), ('\u{568}', 1336), ('\u{569}', 1337),
+ ('\u{56a}', 1338), ('\u{56b}', 1339), ('\u{56c}', 1340), ('\u{56d}', 1341),
+ ('\u{56e}', 1342), ('\u{56f}', 1343), ('\u{570}', 1344), ('\u{571}', 1345),
+ ('\u{572}', 1346), ('\u{573}', 1347), ('\u{574}', 1348), ('\u{575}', 1349),
+ ('\u{576}', 1350), ('\u{577}', 1351), ('\u{578}', 1352), ('\u{579}', 1353),
+ ('\u{57a}', 1354), ('\u{57b}', 1355), ('\u{57c}', 1356), ('\u{57d}', 1357),
+ ('\u{57e}', 1358), ('\u{57f}', 1359), ('\u{580}', 1360), ('\u{581}', 1361),
+ ('\u{582}', 1362), ('\u{583}', 1363), ('\u{584}', 1364), ('\u{585}', 1365),
+ ('\u{586}', 1366), ('\u{587}', 4194309), ('\u{10d0}', 7312), ('\u{10d1}', 7313),
+ ('\u{10d2}', 7314), ('\u{10d3}', 7315), ('\u{10d4}', 7316), ('\u{10d5}', 7317),
+ ('\u{10d6}', 7318), ('\u{10d7}', 7319), ('\u{10d8}', 7320), ('\u{10d9}', 7321),
+ ('\u{10da}', 7322), ('\u{10db}', 7323), ('\u{10dc}', 7324), ('\u{10dd}', 7325),
+ ('\u{10de}', 7326), ('\u{10df}', 7327), ('\u{10e0}', 7328), ('\u{10e1}', 7329),
+ ('\u{10e2}', 7330), ('\u{10e3}', 7331), ('\u{10e4}', 7332), ('\u{10e5}', 7333),
+ ('\u{10e6}', 7334), ('\u{10e7}', 7335), ('\u{10e8}', 7336), ('\u{10e9}', 7337),
+ ('\u{10ea}', 7338), ('\u{10eb}', 7339), ('\u{10ec}', 7340), ('\u{10ed}', 7341),
+ ('\u{10ee}', 7342), ('\u{10ef}', 7343), ('\u{10f0}', 7344), ('\u{10f1}', 7345),
+ ('\u{10f2}', 7346), ('\u{10f3}', 7347), ('\u{10f4}', 7348), ('\u{10f5}', 7349),
+ ('\u{10f6}', 7350), ('\u{10f7}', 7351), ('\u{10f8}', 7352), ('\u{10f9}', 7353),
+ ('\u{10fa}', 7354), ('\u{10fd}', 7357), ('\u{10fe}', 7358), ('\u{10ff}', 7359),
+ ('\u{13f8}', 5104), ('\u{13f9}', 5105), ('\u{13fa}', 5106), ('\u{13fb}', 5107),
+ ('\u{13fc}', 5108), ('\u{13fd}', 5109), ('\u{1c80}', 1042), ('\u{1c81}', 1044),
+ ('\u{1c82}', 1054), ('\u{1c83}', 1057), ('\u{1c84}', 1058), ('\u{1c85}', 1058),
+ ('\u{1c86}', 1066), ('\u{1c87}', 1122), ('\u{1c88}', 42570), ('\u{1d79}', 42877),
+ ('\u{1d7d}', 11363), ('\u{1d8e}', 42950), ('\u{1e01}', 7680), ('\u{1e03}', 7682),
+ ('\u{1e05}', 7684), ('\u{1e07}', 7686), ('\u{1e09}', 7688), ('\u{1e0b}', 7690),
+ ('\u{1e0d}', 7692), ('\u{1e0f}', 7694), ('\u{1e11}', 7696), ('\u{1e13}', 7698),
+ ('\u{1e15}', 7700), ('\u{1e17}', 7702), ('\u{1e19}', 7704), ('\u{1e1b}', 7706),
+ ('\u{1e1d}', 7708), ('\u{1e1f}', 7710), ('\u{1e21}', 7712), ('\u{1e23}', 7714),
+ ('\u{1e25}', 7716), ('\u{1e27}', 7718), ('\u{1e29}', 7720), ('\u{1e2b}', 7722),
+ ('\u{1e2d}', 7724), ('\u{1e2f}', 7726), ('\u{1e31}', 7728), ('\u{1e33}', 7730),
+ ('\u{1e35}', 7732), ('\u{1e37}', 7734), ('\u{1e39}', 7736), ('\u{1e3b}', 7738),
+ ('\u{1e3d}', 7740), ('\u{1e3f}', 7742), ('\u{1e41}', 7744), ('\u{1e43}', 7746),
+ ('\u{1e45}', 7748), ('\u{1e47}', 7750), ('\u{1e49}', 7752), ('\u{1e4b}', 7754),
+ ('\u{1e4d}', 7756), ('\u{1e4f}', 7758), ('\u{1e51}', 7760), ('\u{1e53}', 7762),
+ ('\u{1e55}', 7764), ('\u{1e57}', 7766), ('\u{1e59}', 7768), ('\u{1e5b}', 7770),
+ ('\u{1e5d}', 7772), ('\u{1e5f}', 7774), ('\u{1e61}', 7776), ('\u{1e63}', 7778),
+ ('\u{1e65}', 7780), ('\u{1e67}', 7782), ('\u{1e69}', 7784), ('\u{1e6b}', 7786),
+ ('\u{1e6d}', 7788), ('\u{1e6f}', 7790), ('\u{1e71}', 7792), ('\u{1e73}', 7794),
+ ('\u{1e75}', 7796), ('\u{1e77}', 7798), ('\u{1e79}', 7800), ('\u{1e7b}', 7802),
+ ('\u{1e7d}', 7804), ('\u{1e7f}', 7806), ('\u{1e81}', 7808), ('\u{1e83}', 7810),
+ ('\u{1e85}', 7812), ('\u{1e87}', 7814), ('\u{1e89}', 7816), ('\u{1e8b}', 7818),
+ ('\u{1e8d}', 7820), ('\u{1e8f}', 7822), ('\u{1e91}', 7824), ('\u{1e93}', 7826),
+ ('\u{1e95}', 7828), ('\u{1e96}', 4194310), ('\u{1e97}', 4194311), ('\u{1e98}', 4194312),
+ ('\u{1e99}', 4194313), ('\u{1e9a}', 4194314), ('\u{1e9b}', 7776), ('\u{1ea1}', 7840),
+ ('\u{1ea3}', 7842), ('\u{1ea5}', 7844), ('\u{1ea7}', 7846), ('\u{1ea9}', 7848),
+ ('\u{1eab}', 7850), ('\u{1ead}', 7852), ('\u{1eaf}', 7854), ('\u{1eb1}', 7856),
+ ('\u{1eb3}', 7858), ('\u{1eb5}', 7860), ('\u{1eb7}', 7862), ('\u{1eb9}', 7864),
+ ('\u{1ebb}', 7866), ('\u{1ebd}', 7868), ('\u{1ebf}', 7870), ('\u{1ec1}', 7872),
+ ('\u{1ec3}', 7874), ('\u{1ec5}', 7876), ('\u{1ec7}', 7878), ('\u{1ec9}', 7880),
+ ('\u{1ecb}', 7882), ('\u{1ecd}', 7884), ('\u{1ecf}', 7886), ('\u{1ed1}', 7888),
+ ('\u{1ed3}', 7890), ('\u{1ed5}', 7892), ('\u{1ed7}', 7894), ('\u{1ed9}', 7896),
+ ('\u{1edb}', 7898), ('\u{1edd}', 7900), ('\u{1edf}', 7902), ('\u{1ee1}', 7904),
+ ('\u{1ee3}', 7906), ('\u{1ee5}', 7908), ('\u{1ee7}', 7910), ('\u{1ee9}', 7912),
+ ('\u{1eeb}', 7914), ('\u{1eed}', 7916), ('\u{1eef}', 7918), ('\u{1ef1}', 7920),
+ ('\u{1ef3}', 7922), ('\u{1ef5}', 7924), ('\u{1ef7}', 7926), ('\u{1ef9}', 7928),
+ ('\u{1efb}', 7930), ('\u{1efd}', 7932), ('\u{1eff}', 7934), ('\u{1f00}', 7944),
+ ('\u{1f01}', 7945), ('\u{1f02}', 7946), ('\u{1f03}', 7947), ('\u{1f04}', 7948),
+ ('\u{1f05}', 7949), ('\u{1f06}', 7950), ('\u{1f07}', 7951), ('\u{1f10}', 7960),
+ ('\u{1f11}', 7961), ('\u{1f12}', 7962), ('\u{1f13}', 7963), ('\u{1f14}', 7964),
+ ('\u{1f15}', 7965), ('\u{1f20}', 7976), ('\u{1f21}', 7977), ('\u{1f22}', 7978),
+ ('\u{1f23}', 7979), ('\u{1f24}', 7980), ('\u{1f25}', 7981), ('\u{1f26}', 7982),
+ ('\u{1f27}', 7983), ('\u{1f30}', 7992), ('\u{1f31}', 7993), ('\u{1f32}', 7994),
+ ('\u{1f33}', 7995), ('\u{1f34}', 7996), ('\u{1f35}', 7997), ('\u{1f36}', 7998),
+ ('\u{1f37}', 7999), ('\u{1f40}', 8008), ('\u{1f41}', 8009), ('\u{1f42}', 8010),
+ ('\u{1f43}', 8011), ('\u{1f44}', 8012), ('\u{1f45}', 8013), ('\u{1f50}', 4194315),
+ ('\u{1f51}', 8025), ('\u{1f52}', 4194316), ('\u{1f53}', 8027), ('\u{1f54}', 4194317),
+ ('\u{1f55}', 8029), ('\u{1f56}', 4194318), ('\u{1f57}', 8031), ('\u{1f60}', 8040),
+ ('\u{1f61}', 8041), ('\u{1f62}', 8042), ('\u{1f63}', 8043), ('\u{1f64}', 8044),
+ ('\u{1f65}', 8045), ('\u{1f66}', 8046), ('\u{1f67}', 8047), ('\u{1f70}', 8122),
+ ('\u{1f71}', 8123), ('\u{1f72}', 8136), ('\u{1f73}', 8137), ('\u{1f74}', 8138),
+ ('\u{1f75}', 8139), ('\u{1f76}', 8154), ('\u{1f77}', 8155), ('\u{1f78}', 8184),
+ ('\u{1f79}', 8185), ('\u{1f7a}', 8170), ('\u{1f7b}', 8171), ('\u{1f7c}', 8186),
+ ('\u{1f7d}', 8187), ('\u{1f80}', 4194319), ('\u{1f81}', 4194320), ('\u{1f82}', 4194321),
+ ('\u{1f83}', 4194322), ('\u{1f84}', 4194323), ('\u{1f85}', 4194324), ('\u{1f86}', 4194325),
+ ('\u{1f87}', 4194326), ('\u{1f88}', 4194327), ('\u{1f89}', 4194328), ('\u{1f8a}', 4194329),
+ ('\u{1f8b}', 4194330), ('\u{1f8c}', 4194331), ('\u{1f8d}', 4194332), ('\u{1f8e}', 4194333),
+ ('\u{1f8f}', 4194334), ('\u{1f90}', 4194335), ('\u{1f91}', 4194336), ('\u{1f92}', 4194337),
+ ('\u{1f93}', 4194338), ('\u{1f94}', 4194339), ('\u{1f95}', 4194340), ('\u{1f96}', 4194341),
+ ('\u{1f97}', 4194342), ('\u{1f98}', 4194343), ('\u{1f99}', 4194344), ('\u{1f9a}', 4194345),
+ ('\u{1f9b}', 4194346), ('\u{1f9c}', 4194347), ('\u{1f9d}', 4194348), ('\u{1f9e}', 4194349),
+ ('\u{1f9f}', 4194350), ('\u{1fa0}', 4194351), ('\u{1fa1}', 4194352), ('\u{1fa2}', 4194353),
+ ('\u{1fa3}', 4194354), ('\u{1fa4}', 4194355), ('\u{1fa5}', 4194356), ('\u{1fa6}', 4194357),
+ ('\u{1fa7}', 4194358), ('\u{1fa8}', 4194359), ('\u{1fa9}', 4194360), ('\u{1faa}', 4194361),
+ ('\u{1fab}', 4194362), ('\u{1fac}', 4194363), ('\u{1fad}', 4194364), ('\u{1fae}', 4194365),
+ ('\u{1faf}', 4194366), ('\u{1fb0}', 8120), ('\u{1fb1}', 8121), ('\u{1fb2}', 4194367),
+ ('\u{1fb3}', 4194368), ('\u{1fb4}', 4194369), ('\u{1fb6}', 4194370), ('\u{1fb7}', 4194371),
+ ('\u{1fbc}', 4194372), ('\u{1fbe}', 921), ('\u{1fc2}', 4194373), ('\u{1fc3}', 4194374),
+ ('\u{1fc4}', 4194375), ('\u{1fc6}', 4194376), ('\u{1fc7}', 4194377), ('\u{1fcc}', 4194378),
+ ('\u{1fd0}', 8152), ('\u{1fd1}', 8153), ('\u{1fd2}', 4194379), ('\u{1fd3}', 4194380),
+ ('\u{1fd6}', 4194381), ('\u{1fd7}', 4194382), ('\u{1fe0}', 8168), ('\u{1fe1}', 8169),
+ ('\u{1fe2}', 4194383), ('\u{1fe3}', 4194384), ('\u{1fe4}', 4194385), ('\u{1fe5}', 8172),
+ ('\u{1fe6}', 4194386), ('\u{1fe7}', 4194387), ('\u{1ff2}', 4194388), ('\u{1ff3}', 4194389),
+ ('\u{1ff4}', 4194390), ('\u{1ff6}', 4194391), ('\u{1ff7}', 4194392), ('\u{1ffc}', 4194393),
+ ('\u{214e}', 8498), ('\u{2170}', 8544), ('\u{2171}', 8545), ('\u{2172}', 8546),
+ ('\u{2173}', 8547), ('\u{2174}', 8548), ('\u{2175}', 8549), ('\u{2176}', 8550),
+ ('\u{2177}', 8551), ('\u{2178}', 8552), ('\u{2179}', 8553), ('\u{217a}', 8554),
+ ('\u{217b}', 8555), ('\u{217c}', 8556), ('\u{217d}', 8557), ('\u{217e}', 8558),
+ ('\u{217f}', 8559), ('\u{2184}', 8579), ('\u{24d0}', 9398), ('\u{24d1}', 9399),
+ ('\u{24d2}', 9400), ('\u{24d3}', 9401), ('\u{24d4}', 9402), ('\u{24d5}', 9403),
+ ('\u{24d6}', 9404), ('\u{24d7}', 9405), ('\u{24d8}', 9406), ('\u{24d9}', 9407),
+ ('\u{24da}', 9408), ('\u{24db}', 9409), ('\u{24dc}', 9410), ('\u{24dd}', 9411),
+ ('\u{24de}', 9412), ('\u{24df}', 9413), ('\u{24e0}', 9414), ('\u{24e1}', 9415),
+ ('\u{24e2}', 9416), ('\u{24e3}', 9417), ('\u{24e4}', 9418), ('\u{24e5}', 9419),
+ ('\u{24e6}', 9420), ('\u{24e7}', 9421), ('\u{24e8}', 9422), ('\u{24e9}', 9423),
+ ('\u{2c30}', 11264), ('\u{2c31}', 11265), ('\u{2c32}', 11266), ('\u{2c33}', 11267),
+ ('\u{2c34}', 11268), ('\u{2c35}', 11269), ('\u{2c36}', 11270), ('\u{2c37}', 11271),
+ ('\u{2c38}', 11272), ('\u{2c39}', 11273), ('\u{2c3a}', 11274), ('\u{2c3b}', 11275),
+ ('\u{2c3c}', 11276), ('\u{2c3d}', 11277), ('\u{2c3e}', 11278), ('\u{2c3f}', 11279),
+ ('\u{2c40}', 11280), ('\u{2c41}', 11281), ('\u{2c42}', 11282), ('\u{2c43}', 11283),
+ ('\u{2c44}', 11284), ('\u{2c45}', 11285), ('\u{2c46}', 11286), ('\u{2c47}', 11287),
+ ('\u{2c48}', 11288), ('\u{2c49}', 11289), ('\u{2c4a}', 11290), ('\u{2c4b}', 11291),
+ ('\u{2c4c}', 11292), ('\u{2c4d}', 11293), ('\u{2c4e}', 11294), ('\u{2c4f}', 11295),
+ ('\u{2c50}', 11296), ('\u{2c51}', 11297), ('\u{2c52}', 11298), ('\u{2c53}', 11299),
+ ('\u{2c54}', 11300), ('\u{2c55}', 11301), ('\u{2c56}', 11302), ('\u{2c57}', 11303),
+ ('\u{2c58}', 11304), ('\u{2c59}', 11305), ('\u{2c5a}', 11306), ('\u{2c5b}', 11307),
+ ('\u{2c5c}', 11308), ('\u{2c5d}', 11309), ('\u{2c5e}', 11310), ('\u{2c5f}', 11311),
+ ('\u{2c61}', 11360), ('\u{2c65}', 570), ('\u{2c66}', 574), ('\u{2c68}', 11367),
+ ('\u{2c6a}', 11369), ('\u{2c6c}', 11371), ('\u{2c73}', 11378), ('\u{2c76}', 11381),
+ ('\u{2c81}', 11392), ('\u{2c83}', 11394), ('\u{2c85}', 11396), ('\u{2c87}', 11398),
+ ('\u{2c89}', 11400), ('\u{2c8b}', 11402), ('\u{2c8d}', 11404), ('\u{2c8f}', 11406),
+ ('\u{2c91}', 11408), ('\u{2c93}', 11410), ('\u{2c95}', 11412), ('\u{2c97}', 11414),
+ ('\u{2c99}', 11416), ('\u{2c9b}', 11418), ('\u{2c9d}', 11420), ('\u{2c9f}', 11422),
+ ('\u{2ca1}', 11424), ('\u{2ca3}', 11426), ('\u{2ca5}', 11428), ('\u{2ca7}', 11430),
+ ('\u{2ca9}', 11432), ('\u{2cab}', 11434), ('\u{2cad}', 11436), ('\u{2caf}', 11438),
+ ('\u{2cb1}', 11440), ('\u{2cb3}', 11442), ('\u{2cb5}', 11444), ('\u{2cb7}', 11446),
+ ('\u{2cb9}', 11448), ('\u{2cbb}', 11450), ('\u{2cbd}', 11452), ('\u{2cbf}', 11454),
+ ('\u{2cc1}', 11456), ('\u{2cc3}', 11458), ('\u{2cc5}', 11460), ('\u{2cc7}', 11462),
+ ('\u{2cc9}', 11464), ('\u{2ccb}', 11466), ('\u{2ccd}', 11468), ('\u{2ccf}', 11470),
+ ('\u{2cd1}', 11472), ('\u{2cd3}', 11474), ('\u{2cd5}', 11476), ('\u{2cd7}', 11478),
+ ('\u{2cd9}', 11480), ('\u{2cdb}', 11482), ('\u{2cdd}', 11484), ('\u{2cdf}', 11486),
+ ('\u{2ce1}', 11488), ('\u{2ce3}', 11490), ('\u{2cec}', 11499), ('\u{2cee}', 11501),
+ ('\u{2cf3}', 11506), ('\u{2d00}', 4256), ('\u{2d01}', 4257), ('\u{2d02}', 4258),
+ ('\u{2d03}', 4259), ('\u{2d04}', 4260), ('\u{2d05}', 4261), ('\u{2d06}', 4262),
+ ('\u{2d07}', 4263), ('\u{2d08}', 4264), ('\u{2d09}', 4265), ('\u{2d0a}', 4266),
+ ('\u{2d0b}', 4267), ('\u{2d0c}', 4268), ('\u{2d0d}', 4269), ('\u{2d0e}', 4270),
+ ('\u{2d0f}', 4271), ('\u{2d10}', 4272), ('\u{2d11}', 4273), ('\u{2d12}', 4274),
+ ('\u{2d13}', 4275), ('\u{2d14}', 4276), ('\u{2d15}', 4277), ('\u{2d16}', 4278),
+ ('\u{2d17}', 4279), ('\u{2d18}', 4280), ('\u{2d19}', 4281), ('\u{2d1a}', 4282),
+ ('\u{2d1b}', 4283), ('\u{2d1c}', 4284), ('\u{2d1d}', 4285), ('\u{2d1e}', 4286),
+ ('\u{2d1f}', 4287), ('\u{2d20}', 4288), ('\u{2d21}', 4289), ('\u{2d22}', 4290),
+ ('\u{2d23}', 4291), ('\u{2d24}', 4292), ('\u{2d25}', 4293), ('\u{2d27}', 4295),
+ ('\u{2d2d}', 4301), ('\u{a641}', 42560), ('\u{a643}', 42562), ('\u{a645}', 42564),
+ ('\u{a647}', 42566), ('\u{a649}', 42568), ('\u{a64b}', 42570), ('\u{a64d}', 42572),
+ ('\u{a64f}', 42574), ('\u{a651}', 42576), ('\u{a653}', 42578), ('\u{a655}', 42580),
+ ('\u{a657}', 42582), ('\u{a659}', 42584), ('\u{a65b}', 42586), ('\u{a65d}', 42588),
+ ('\u{a65f}', 42590), ('\u{a661}', 42592), ('\u{a663}', 42594), ('\u{a665}', 42596),
+ ('\u{a667}', 42598), ('\u{a669}', 42600), ('\u{a66b}', 42602), ('\u{a66d}', 42604),
+ ('\u{a681}', 42624), ('\u{a683}', 42626), ('\u{a685}', 42628), ('\u{a687}', 42630),
+ ('\u{a689}', 42632), ('\u{a68b}', 42634), ('\u{a68d}', 42636), ('\u{a68f}', 42638),
+ ('\u{a691}', 42640), ('\u{a693}', 42642), ('\u{a695}', 42644), ('\u{a697}', 42646),
+ ('\u{a699}', 42648), ('\u{a69b}', 42650), ('\u{a723}', 42786), ('\u{a725}', 42788),
+ ('\u{a727}', 42790), ('\u{a729}', 42792), ('\u{a72b}', 42794), ('\u{a72d}', 42796),
+ ('\u{a72f}', 42798), ('\u{a733}', 42802), ('\u{a735}', 42804), ('\u{a737}', 42806),
+ ('\u{a739}', 42808), ('\u{a73b}', 42810), ('\u{a73d}', 42812), ('\u{a73f}', 42814),
+ ('\u{a741}', 42816), ('\u{a743}', 42818), ('\u{a745}', 42820), ('\u{a747}', 42822),
+ ('\u{a749}', 42824), ('\u{a74b}', 42826), ('\u{a74d}', 42828), ('\u{a74f}', 42830),
+ ('\u{a751}', 42832), ('\u{a753}', 42834), ('\u{a755}', 42836), ('\u{a757}', 42838),
+ ('\u{a759}', 42840), ('\u{a75b}', 42842), ('\u{a75d}', 42844), ('\u{a75f}', 42846),
+ ('\u{a761}', 42848), ('\u{a763}', 42850), ('\u{a765}', 42852), ('\u{a767}', 42854),
+ ('\u{a769}', 42856), ('\u{a76b}', 42858), ('\u{a76d}', 42860), ('\u{a76f}', 42862),
+ ('\u{a77a}', 42873), ('\u{a77c}', 42875), ('\u{a77f}', 42878), ('\u{a781}', 42880),
+ ('\u{a783}', 42882), ('\u{a785}', 42884), ('\u{a787}', 42886), ('\u{a78c}', 42891),
+ ('\u{a791}', 42896), ('\u{a793}', 42898), ('\u{a794}', 42948), ('\u{a797}', 42902),
+ ('\u{a799}', 42904), ('\u{a79b}', 42906), ('\u{a79d}', 42908), ('\u{a79f}', 42910),
+ ('\u{a7a1}', 42912), ('\u{a7a3}', 42914), ('\u{a7a5}', 42916), ('\u{a7a7}', 42918),
+ ('\u{a7a9}', 42920), ('\u{a7b5}', 42932), ('\u{a7b7}', 42934), ('\u{a7b9}', 42936),
+ ('\u{a7bb}', 42938), ('\u{a7bd}', 42940), ('\u{a7bf}', 42942), ('\u{a7c1}', 42944),
+ ('\u{a7c3}', 42946), ('\u{a7c8}', 42951), ('\u{a7ca}', 42953), ('\u{a7d1}', 42960),
+ ('\u{a7d7}', 42966), ('\u{a7d9}', 42968), ('\u{a7f6}', 42997), ('\u{ab53}', 42931),
+ ('\u{ab70}', 5024), ('\u{ab71}', 5025), ('\u{ab72}', 5026), ('\u{ab73}', 5027),
+ ('\u{ab74}', 5028), ('\u{ab75}', 5029), ('\u{ab76}', 5030), ('\u{ab77}', 5031),
+ ('\u{ab78}', 5032), ('\u{ab79}', 5033), ('\u{ab7a}', 5034), ('\u{ab7b}', 5035),
+ ('\u{ab7c}', 5036), ('\u{ab7d}', 5037), ('\u{ab7e}', 5038), ('\u{ab7f}', 5039),
+ ('\u{ab80}', 5040), ('\u{ab81}', 5041), ('\u{ab82}', 5042), ('\u{ab83}', 5043),
+ ('\u{ab84}', 5044), ('\u{ab85}', 5045), ('\u{ab86}', 5046), ('\u{ab87}', 5047),
+ ('\u{ab88}', 5048), ('\u{ab89}', 5049), ('\u{ab8a}', 5050), ('\u{ab8b}', 5051),
+ ('\u{ab8c}', 5052), ('\u{ab8d}', 5053), ('\u{ab8e}', 5054), ('\u{ab8f}', 5055),
+ ('\u{ab90}', 5056), ('\u{ab91}', 5057), ('\u{ab92}', 5058), ('\u{ab93}', 5059),
+ ('\u{ab94}', 5060), ('\u{ab95}', 5061), ('\u{ab96}', 5062), ('\u{ab97}', 5063),
+ ('\u{ab98}', 5064), ('\u{ab99}', 5065), ('\u{ab9a}', 5066), ('\u{ab9b}', 5067),
+ ('\u{ab9c}', 5068), ('\u{ab9d}', 5069), ('\u{ab9e}', 5070), ('\u{ab9f}', 5071),
+ ('\u{aba0}', 5072), ('\u{aba1}', 5073), ('\u{aba2}', 5074), ('\u{aba3}', 5075),
+ ('\u{aba4}', 5076), ('\u{aba5}', 5077), ('\u{aba6}', 5078), ('\u{aba7}', 5079),
+ ('\u{aba8}', 5080), ('\u{aba9}', 5081), ('\u{abaa}', 5082), ('\u{abab}', 5083),
+ ('\u{abac}', 5084), ('\u{abad}', 5085), ('\u{abae}', 5086), ('\u{abaf}', 5087),
+ ('\u{abb0}', 5088), ('\u{abb1}', 5089), ('\u{abb2}', 5090), ('\u{abb3}', 5091),
+ ('\u{abb4}', 5092), ('\u{abb5}', 5093), ('\u{abb6}', 5094), ('\u{abb7}', 5095),
+ ('\u{abb8}', 5096), ('\u{abb9}', 5097), ('\u{abba}', 5098), ('\u{abbb}', 5099),
+ ('\u{abbc}', 5100), ('\u{abbd}', 5101), ('\u{abbe}', 5102), ('\u{abbf}', 5103),
+ ('\u{fb00}', 4194394), ('\u{fb01}', 4194395), ('\u{fb02}', 4194396), ('\u{fb03}', 4194397),
+ ('\u{fb04}', 4194398), ('\u{fb05}', 4194399), ('\u{fb06}', 4194400), ('\u{fb13}', 4194401),
+ ('\u{fb14}', 4194402), ('\u{fb15}', 4194403), ('\u{fb16}', 4194404), ('\u{fb17}', 4194405),
+ ('\u{ff41}', 65313), ('\u{ff42}', 65314), ('\u{ff43}', 65315), ('\u{ff44}', 65316),
+ ('\u{ff45}', 65317), ('\u{ff46}', 65318), ('\u{ff47}', 65319), ('\u{ff48}', 65320),
+ ('\u{ff49}', 65321), ('\u{ff4a}', 65322), ('\u{ff4b}', 65323), ('\u{ff4c}', 65324),
+ ('\u{ff4d}', 65325), ('\u{ff4e}', 65326), ('\u{ff4f}', 65327), ('\u{ff50}', 65328),
+ ('\u{ff51}', 65329), ('\u{ff52}', 65330), ('\u{ff53}', 65331), ('\u{ff54}', 65332),
+ ('\u{ff55}', 65333), ('\u{ff56}', 65334), ('\u{ff57}', 65335), ('\u{ff58}', 65336),
+ ('\u{ff59}', 65337), ('\u{ff5a}', 65338), ('\u{10428}', 66560), ('\u{10429}', 66561),
+ ('\u{1042a}', 66562), ('\u{1042b}', 66563), ('\u{1042c}', 66564), ('\u{1042d}', 66565),
+ ('\u{1042e}', 66566), ('\u{1042f}', 66567), ('\u{10430}', 66568), ('\u{10431}', 66569),
+ ('\u{10432}', 66570), ('\u{10433}', 66571), ('\u{10434}', 66572), ('\u{10435}', 66573),
+ ('\u{10436}', 66574), ('\u{10437}', 66575), ('\u{10438}', 66576), ('\u{10439}', 66577),
+ ('\u{1043a}', 66578), ('\u{1043b}', 66579), ('\u{1043c}', 66580), ('\u{1043d}', 66581),
+ ('\u{1043e}', 66582), ('\u{1043f}', 66583), ('\u{10440}', 66584), ('\u{10441}', 66585),
+ ('\u{10442}', 66586), ('\u{10443}', 66587), ('\u{10444}', 66588), ('\u{10445}', 66589),
+ ('\u{10446}', 66590), ('\u{10447}', 66591), ('\u{10448}', 66592), ('\u{10449}', 66593),
+ ('\u{1044a}', 66594), ('\u{1044b}', 66595), ('\u{1044c}', 66596), ('\u{1044d}', 66597),
+ ('\u{1044e}', 66598), ('\u{1044f}', 66599), ('\u{104d8}', 66736), ('\u{104d9}', 66737),
+ ('\u{104da}', 66738), ('\u{104db}', 66739), ('\u{104dc}', 66740), ('\u{104dd}', 66741),
+ ('\u{104de}', 66742), ('\u{104df}', 66743), ('\u{104e0}', 66744), ('\u{104e1}', 66745),
+ ('\u{104e2}', 66746), ('\u{104e3}', 66747), ('\u{104e4}', 66748), ('\u{104e5}', 66749),
+ ('\u{104e6}', 66750), ('\u{104e7}', 66751), ('\u{104e8}', 66752), ('\u{104e9}', 66753),
+ ('\u{104ea}', 66754), ('\u{104eb}', 66755), ('\u{104ec}', 66756), ('\u{104ed}', 66757),
+ ('\u{104ee}', 66758), ('\u{104ef}', 66759), ('\u{104f0}', 66760), ('\u{104f1}', 66761),
+ ('\u{104f2}', 66762), ('\u{104f3}', 66763), ('\u{104f4}', 66764), ('\u{104f5}', 66765),
+ ('\u{104f6}', 66766), ('\u{104f7}', 66767), ('\u{104f8}', 66768), ('\u{104f9}', 66769),
+ ('\u{104fa}', 66770), ('\u{104fb}', 66771), ('\u{10597}', 66928), ('\u{10598}', 66929),
+ ('\u{10599}', 66930), ('\u{1059a}', 66931), ('\u{1059b}', 66932), ('\u{1059c}', 66933),
+ ('\u{1059d}', 66934), ('\u{1059e}', 66935), ('\u{1059f}', 66936), ('\u{105a0}', 66937),
+ ('\u{105a1}', 66938), ('\u{105a3}', 66940), ('\u{105a4}', 66941), ('\u{105a5}', 66942),
+ ('\u{105a6}', 66943), ('\u{105a7}', 66944), ('\u{105a8}', 66945), ('\u{105a9}', 66946),
+ ('\u{105aa}', 66947), ('\u{105ab}', 66948), ('\u{105ac}', 66949), ('\u{105ad}', 66950),
+ ('\u{105ae}', 66951), ('\u{105af}', 66952), ('\u{105b0}', 66953), ('\u{105b1}', 66954),
+ ('\u{105b3}', 66956), ('\u{105b4}', 66957), ('\u{105b5}', 66958), ('\u{105b6}', 66959),
+ ('\u{105b7}', 66960), ('\u{105b8}', 66961), ('\u{105b9}', 66962), ('\u{105bb}', 66964),
+ ('\u{105bc}', 66965), ('\u{10cc0}', 68736), ('\u{10cc1}', 68737), ('\u{10cc2}', 68738),
+ ('\u{10cc3}', 68739), ('\u{10cc4}', 68740), ('\u{10cc5}', 68741), ('\u{10cc6}', 68742),
+ ('\u{10cc7}', 68743), ('\u{10cc8}', 68744), ('\u{10cc9}', 68745), ('\u{10cca}', 68746),
+ ('\u{10ccb}', 68747), ('\u{10ccc}', 68748), ('\u{10ccd}', 68749), ('\u{10cce}', 68750),
+ ('\u{10ccf}', 68751), ('\u{10cd0}', 68752), ('\u{10cd1}', 68753), ('\u{10cd2}', 68754),
+ ('\u{10cd3}', 68755), ('\u{10cd4}', 68756), ('\u{10cd5}', 68757), ('\u{10cd6}', 68758),
+ ('\u{10cd7}', 68759), ('\u{10cd8}', 68760), ('\u{10cd9}', 68761), ('\u{10cda}', 68762),
+ ('\u{10cdb}', 68763), ('\u{10cdc}', 68764), ('\u{10cdd}', 68765), ('\u{10cde}', 68766),
+ ('\u{10cdf}', 68767), ('\u{10ce0}', 68768), ('\u{10ce1}', 68769), ('\u{10ce2}', 68770),
+ ('\u{10ce3}', 68771), ('\u{10ce4}', 68772), ('\u{10ce5}', 68773), ('\u{10ce6}', 68774),
+ ('\u{10ce7}', 68775), ('\u{10ce8}', 68776), ('\u{10ce9}', 68777), ('\u{10cea}', 68778),
+ ('\u{10ceb}', 68779), ('\u{10cec}', 68780), ('\u{10ced}', 68781), ('\u{10cee}', 68782),
+ ('\u{10cef}', 68783), ('\u{10cf0}', 68784), ('\u{10cf1}', 68785), ('\u{10cf2}', 68786),
+ ('\u{118c0}', 71840), ('\u{118c1}', 71841), ('\u{118c2}', 71842), ('\u{118c3}', 71843),
+ ('\u{118c4}', 71844), ('\u{118c5}', 71845), ('\u{118c6}', 71846), ('\u{118c7}', 71847),
+ ('\u{118c8}', 71848), ('\u{118c9}', 71849), ('\u{118ca}', 71850), ('\u{118cb}', 71851),
+ ('\u{118cc}', 71852), ('\u{118cd}', 71853), ('\u{118ce}', 71854), ('\u{118cf}', 71855),
+ ('\u{118d0}', 71856), ('\u{118d1}', 71857), ('\u{118d2}', 71858), ('\u{118d3}', 71859),
+ ('\u{118d4}', 71860), ('\u{118d5}', 71861), ('\u{118d6}', 71862), ('\u{118d7}', 71863),
+ ('\u{118d8}', 71864), ('\u{118d9}', 71865), ('\u{118da}', 71866), ('\u{118db}', 71867),
+ ('\u{118dc}', 71868), ('\u{118dd}', 71869), ('\u{118de}', 71870), ('\u{118df}', 71871),
+ ('\u{16e60}', 93760), ('\u{16e61}', 93761), ('\u{16e62}', 93762), ('\u{16e63}', 93763),
+ ('\u{16e64}', 93764), ('\u{16e65}', 93765), ('\u{16e66}', 93766), ('\u{16e67}', 93767),
+ ('\u{16e68}', 93768), ('\u{16e69}', 93769), ('\u{16e6a}', 93770), ('\u{16e6b}', 93771),
+ ('\u{16e6c}', 93772), ('\u{16e6d}', 93773), ('\u{16e6e}', 93774), ('\u{16e6f}', 93775),
+ ('\u{16e70}', 93776), ('\u{16e71}', 93777), ('\u{16e72}', 93778), ('\u{16e73}', 93779),
+ ('\u{16e74}', 93780), ('\u{16e75}', 93781), ('\u{16e76}', 93782), ('\u{16e77}', 93783),
+ ('\u{16e78}', 93784), ('\u{16e79}', 93785), ('\u{16e7a}', 93786), ('\u{16e7b}', 93787),
+ ('\u{16e7c}', 93788), ('\u{16e7d}', 93789), ('\u{16e7e}', 93790), ('\u{16e7f}', 93791),
+ ('\u{1e922}', 125184), ('\u{1e923}', 125185), ('\u{1e924}', 125186), ('\u{1e925}', 125187),
+ ('\u{1e926}', 125188), ('\u{1e927}', 125189), ('\u{1e928}', 125190), ('\u{1e929}', 125191),
+ ('\u{1e92a}', 125192), ('\u{1e92b}', 125193), ('\u{1e92c}', 125194), ('\u{1e92d}', 125195),
+ ('\u{1e92e}', 125196), ('\u{1e92f}', 125197), ('\u{1e930}', 125198), ('\u{1e931}', 125199),
+ ('\u{1e932}', 125200), ('\u{1e933}', 125201), ('\u{1e934}', 125202), ('\u{1e935}', 125203),
+ ('\u{1e936}', 125204), ('\u{1e937}', 125205), ('\u{1e938}', 125206), ('\u{1e939}', 125207),
+ ('\u{1e93a}', 125208), ('\u{1e93b}', 125209), ('\u{1e93c}', 125210), ('\u{1e93d}', 125211),
+ ('\u{1e93e}', 125212), ('\u{1e93f}', 125213), ('\u{1e940}', 125214), ('\u{1e941}', 125215),
+ ('\u{1e942}', 125216), ('\u{1e943}', 125217),
];
- static UPPERCASE_TABLE: &[(char, [char; 3])] = &[
- ('a', ['A', '\u{0}', '\u{0}']), ('b', ['B', '\u{0}', '\u{0}']),
- ('c', ['C', '\u{0}', '\u{0}']), ('d', ['D', '\u{0}', '\u{0}']),
- ('e', ['E', '\u{0}', '\u{0}']), ('f', ['F', '\u{0}', '\u{0}']),
- ('g', ['G', '\u{0}', '\u{0}']), ('h', ['H', '\u{0}', '\u{0}']),
- ('i', ['I', '\u{0}', '\u{0}']), ('j', ['J', '\u{0}', '\u{0}']),
- ('k', ['K', '\u{0}', '\u{0}']), ('l', ['L', '\u{0}', '\u{0}']),
- ('m', ['M', '\u{0}', '\u{0}']), ('n', ['N', '\u{0}', '\u{0}']),
- ('o', ['O', '\u{0}', '\u{0}']), ('p', ['P', '\u{0}', '\u{0}']),
- ('q', ['Q', '\u{0}', '\u{0}']), ('r', ['R', '\u{0}', '\u{0}']),
- ('s', ['S', '\u{0}', '\u{0}']), ('t', ['T', '\u{0}', '\u{0}']),
- ('u', ['U', '\u{0}', '\u{0}']), ('v', ['V', '\u{0}', '\u{0}']),
- ('w', ['W', '\u{0}', '\u{0}']), ('x', ['X', '\u{0}', '\u{0}']),
- ('y', ['Y', '\u{0}', '\u{0}']), ('z', ['Z', '\u{0}', '\u{0}']),
- ('\u{b5}', ['\u{39c}', '\u{0}', '\u{0}']), ('\u{df}', ['S', 'S', '\u{0}']),
- ('\u{e0}', ['\u{c0}', '\u{0}', '\u{0}']), ('\u{e1}', ['\u{c1}', '\u{0}', '\u{0}']),
- ('\u{e2}', ['\u{c2}', '\u{0}', '\u{0}']), ('\u{e3}', ['\u{c3}', '\u{0}', '\u{0}']),
- ('\u{e4}', ['\u{c4}', '\u{0}', '\u{0}']), ('\u{e5}', ['\u{c5}', '\u{0}', '\u{0}']),
- ('\u{e6}', ['\u{c6}', '\u{0}', '\u{0}']), ('\u{e7}', ['\u{c7}', '\u{0}', '\u{0}']),
- ('\u{e8}', ['\u{c8}', '\u{0}', '\u{0}']), ('\u{e9}', ['\u{c9}', '\u{0}', '\u{0}']),
- ('\u{ea}', ['\u{ca}', '\u{0}', '\u{0}']), ('\u{eb}', ['\u{cb}', '\u{0}', '\u{0}']),
- ('\u{ec}', ['\u{cc}', '\u{0}', '\u{0}']), ('\u{ed}', ['\u{cd}', '\u{0}', '\u{0}']),
- ('\u{ee}', ['\u{ce}', '\u{0}', '\u{0}']), ('\u{ef}', ['\u{cf}', '\u{0}', '\u{0}']),
- ('\u{f0}', ['\u{d0}', '\u{0}', '\u{0}']), ('\u{f1}', ['\u{d1}', '\u{0}', '\u{0}']),
- ('\u{f2}', ['\u{d2}', '\u{0}', '\u{0}']), ('\u{f3}', ['\u{d3}', '\u{0}', '\u{0}']),
- ('\u{f4}', ['\u{d4}', '\u{0}', '\u{0}']), ('\u{f5}', ['\u{d5}', '\u{0}', '\u{0}']),
- ('\u{f6}', ['\u{d6}', '\u{0}', '\u{0}']), ('\u{f8}', ['\u{d8}', '\u{0}', '\u{0}']),
- ('\u{f9}', ['\u{d9}', '\u{0}', '\u{0}']), ('\u{fa}', ['\u{da}', '\u{0}', '\u{0}']),
- ('\u{fb}', ['\u{db}', '\u{0}', '\u{0}']), ('\u{fc}', ['\u{dc}', '\u{0}', '\u{0}']),
- ('\u{fd}', ['\u{dd}', '\u{0}', '\u{0}']), ('\u{fe}', ['\u{de}', '\u{0}', '\u{0}']),
- ('\u{ff}', ['\u{178}', '\u{0}', '\u{0}']), ('\u{101}', ['\u{100}', '\u{0}', '\u{0}']),
- ('\u{103}', ['\u{102}', '\u{0}', '\u{0}']), ('\u{105}', ['\u{104}', '\u{0}', '\u{0}']),
- ('\u{107}', ['\u{106}', '\u{0}', '\u{0}']), ('\u{109}', ['\u{108}', '\u{0}', '\u{0}']),
- ('\u{10b}', ['\u{10a}', '\u{0}', '\u{0}']), ('\u{10d}', ['\u{10c}', '\u{0}', '\u{0}']),
- ('\u{10f}', ['\u{10e}', '\u{0}', '\u{0}']), ('\u{111}', ['\u{110}', '\u{0}', '\u{0}']),
- ('\u{113}', ['\u{112}', '\u{0}', '\u{0}']), ('\u{115}', ['\u{114}', '\u{0}', '\u{0}']),
- ('\u{117}', ['\u{116}', '\u{0}', '\u{0}']), ('\u{119}', ['\u{118}', '\u{0}', '\u{0}']),
- ('\u{11b}', ['\u{11a}', '\u{0}', '\u{0}']), ('\u{11d}', ['\u{11c}', '\u{0}', '\u{0}']),
- ('\u{11f}', ['\u{11e}', '\u{0}', '\u{0}']), ('\u{121}', ['\u{120}', '\u{0}', '\u{0}']),
- ('\u{123}', ['\u{122}', '\u{0}', '\u{0}']), ('\u{125}', ['\u{124}', '\u{0}', '\u{0}']),
- ('\u{127}', ['\u{126}', '\u{0}', '\u{0}']), ('\u{129}', ['\u{128}', '\u{0}', '\u{0}']),
- ('\u{12b}', ['\u{12a}', '\u{0}', '\u{0}']), ('\u{12d}', ['\u{12c}', '\u{0}', '\u{0}']),
- ('\u{12f}', ['\u{12e}', '\u{0}', '\u{0}']), ('\u{131}', ['I', '\u{0}', '\u{0}']),
- ('\u{133}', ['\u{132}', '\u{0}', '\u{0}']), ('\u{135}', ['\u{134}', '\u{0}', '\u{0}']),
- ('\u{137}', ['\u{136}', '\u{0}', '\u{0}']), ('\u{13a}', ['\u{139}', '\u{0}', '\u{0}']),
- ('\u{13c}', ['\u{13b}', '\u{0}', '\u{0}']), ('\u{13e}', ['\u{13d}', '\u{0}', '\u{0}']),
- ('\u{140}', ['\u{13f}', '\u{0}', '\u{0}']), ('\u{142}', ['\u{141}', '\u{0}', '\u{0}']),
- ('\u{144}', ['\u{143}', '\u{0}', '\u{0}']), ('\u{146}', ['\u{145}', '\u{0}', '\u{0}']),
- ('\u{148}', ['\u{147}', '\u{0}', '\u{0}']), ('\u{149}', ['\u{2bc}', 'N', '\u{0}']),
- ('\u{14b}', ['\u{14a}', '\u{0}', '\u{0}']), ('\u{14d}', ['\u{14c}', '\u{0}', '\u{0}']),
- ('\u{14f}', ['\u{14e}', '\u{0}', '\u{0}']), ('\u{151}', ['\u{150}', '\u{0}', '\u{0}']),
- ('\u{153}', ['\u{152}', '\u{0}', '\u{0}']), ('\u{155}', ['\u{154}', '\u{0}', '\u{0}']),
- ('\u{157}', ['\u{156}', '\u{0}', '\u{0}']), ('\u{159}', ['\u{158}', '\u{0}', '\u{0}']),
- ('\u{15b}', ['\u{15a}', '\u{0}', '\u{0}']), ('\u{15d}', ['\u{15c}', '\u{0}', '\u{0}']),
- ('\u{15f}', ['\u{15e}', '\u{0}', '\u{0}']), ('\u{161}', ['\u{160}', '\u{0}', '\u{0}']),
- ('\u{163}', ['\u{162}', '\u{0}', '\u{0}']), ('\u{165}', ['\u{164}', '\u{0}', '\u{0}']),
- ('\u{167}', ['\u{166}', '\u{0}', '\u{0}']), ('\u{169}', ['\u{168}', '\u{0}', '\u{0}']),
- ('\u{16b}', ['\u{16a}', '\u{0}', '\u{0}']), ('\u{16d}', ['\u{16c}', '\u{0}', '\u{0}']),
- ('\u{16f}', ['\u{16e}', '\u{0}', '\u{0}']), ('\u{171}', ['\u{170}', '\u{0}', '\u{0}']),
- ('\u{173}', ['\u{172}', '\u{0}', '\u{0}']), ('\u{175}', ['\u{174}', '\u{0}', '\u{0}']),
- ('\u{177}', ['\u{176}', '\u{0}', '\u{0}']), ('\u{17a}', ['\u{179}', '\u{0}', '\u{0}']),
- ('\u{17c}', ['\u{17b}', '\u{0}', '\u{0}']), ('\u{17e}', ['\u{17d}', '\u{0}', '\u{0}']),
- ('\u{17f}', ['S', '\u{0}', '\u{0}']), ('\u{180}', ['\u{243}', '\u{0}', '\u{0}']),
- ('\u{183}', ['\u{182}', '\u{0}', '\u{0}']), ('\u{185}', ['\u{184}', '\u{0}', '\u{0}']),
- ('\u{188}', ['\u{187}', '\u{0}', '\u{0}']), ('\u{18c}', ['\u{18b}', '\u{0}', '\u{0}']),
- ('\u{192}', ['\u{191}', '\u{0}', '\u{0}']), ('\u{195}', ['\u{1f6}', '\u{0}', '\u{0}']),
- ('\u{199}', ['\u{198}', '\u{0}', '\u{0}']), ('\u{19a}', ['\u{23d}', '\u{0}', '\u{0}']),
- ('\u{19e}', ['\u{220}', '\u{0}', '\u{0}']), ('\u{1a1}', ['\u{1a0}', '\u{0}', '\u{0}']),
- ('\u{1a3}', ['\u{1a2}', '\u{0}', '\u{0}']), ('\u{1a5}', ['\u{1a4}', '\u{0}', '\u{0}']),
- ('\u{1a8}', ['\u{1a7}', '\u{0}', '\u{0}']), ('\u{1ad}', ['\u{1ac}', '\u{0}', '\u{0}']),
- ('\u{1b0}', ['\u{1af}', '\u{0}', '\u{0}']), ('\u{1b4}', ['\u{1b3}', '\u{0}', '\u{0}']),
- ('\u{1b6}', ['\u{1b5}', '\u{0}', '\u{0}']), ('\u{1b9}', ['\u{1b8}', '\u{0}', '\u{0}']),
- ('\u{1bd}', ['\u{1bc}', '\u{0}', '\u{0}']), ('\u{1bf}', ['\u{1f7}', '\u{0}', '\u{0}']),
- ('\u{1c5}', ['\u{1c4}', '\u{0}', '\u{0}']), ('\u{1c6}', ['\u{1c4}', '\u{0}', '\u{0}']),
- ('\u{1c8}', ['\u{1c7}', '\u{0}', '\u{0}']), ('\u{1c9}', ['\u{1c7}', '\u{0}', '\u{0}']),
- ('\u{1cb}', ['\u{1ca}', '\u{0}', '\u{0}']), ('\u{1cc}', ['\u{1ca}', '\u{0}', '\u{0}']),
- ('\u{1ce}', ['\u{1cd}', '\u{0}', '\u{0}']), ('\u{1d0}', ['\u{1cf}', '\u{0}', '\u{0}']),
- ('\u{1d2}', ['\u{1d1}', '\u{0}', '\u{0}']), ('\u{1d4}', ['\u{1d3}', '\u{0}', '\u{0}']),
- ('\u{1d6}', ['\u{1d5}', '\u{0}', '\u{0}']), ('\u{1d8}', ['\u{1d7}', '\u{0}', '\u{0}']),
- ('\u{1da}', ['\u{1d9}', '\u{0}', '\u{0}']), ('\u{1dc}', ['\u{1db}', '\u{0}', '\u{0}']),
- ('\u{1dd}', ['\u{18e}', '\u{0}', '\u{0}']), ('\u{1df}', ['\u{1de}', '\u{0}', '\u{0}']),
- ('\u{1e1}', ['\u{1e0}', '\u{0}', '\u{0}']), ('\u{1e3}', ['\u{1e2}', '\u{0}', '\u{0}']),
- ('\u{1e5}', ['\u{1e4}', '\u{0}', '\u{0}']), ('\u{1e7}', ['\u{1e6}', '\u{0}', '\u{0}']),
- ('\u{1e9}', ['\u{1e8}', '\u{0}', '\u{0}']), ('\u{1eb}', ['\u{1ea}', '\u{0}', '\u{0}']),
- ('\u{1ed}', ['\u{1ec}', '\u{0}', '\u{0}']), ('\u{1ef}', ['\u{1ee}', '\u{0}', '\u{0}']),
- ('\u{1f0}', ['J', '\u{30c}', '\u{0}']), ('\u{1f2}', ['\u{1f1}', '\u{0}', '\u{0}']),
- ('\u{1f3}', ['\u{1f1}', '\u{0}', '\u{0}']), ('\u{1f5}', ['\u{1f4}', '\u{0}', '\u{0}']),
- ('\u{1f9}', ['\u{1f8}', '\u{0}', '\u{0}']), ('\u{1fb}', ['\u{1fa}', '\u{0}', '\u{0}']),
- ('\u{1fd}', ['\u{1fc}', '\u{0}', '\u{0}']), ('\u{1ff}', ['\u{1fe}', '\u{0}', '\u{0}']),
- ('\u{201}', ['\u{200}', '\u{0}', '\u{0}']), ('\u{203}', ['\u{202}', '\u{0}', '\u{0}']),
- ('\u{205}', ['\u{204}', '\u{0}', '\u{0}']), ('\u{207}', ['\u{206}', '\u{0}', '\u{0}']),
- ('\u{209}', ['\u{208}', '\u{0}', '\u{0}']), ('\u{20b}', ['\u{20a}', '\u{0}', '\u{0}']),
- ('\u{20d}', ['\u{20c}', '\u{0}', '\u{0}']), ('\u{20f}', ['\u{20e}', '\u{0}', '\u{0}']),
- ('\u{211}', ['\u{210}', '\u{0}', '\u{0}']), ('\u{213}', ['\u{212}', '\u{0}', '\u{0}']),
- ('\u{215}', ['\u{214}', '\u{0}', '\u{0}']), ('\u{217}', ['\u{216}', '\u{0}', '\u{0}']),
- ('\u{219}', ['\u{218}', '\u{0}', '\u{0}']), ('\u{21b}', ['\u{21a}', '\u{0}', '\u{0}']),
- ('\u{21d}', ['\u{21c}', '\u{0}', '\u{0}']), ('\u{21f}', ['\u{21e}', '\u{0}', '\u{0}']),
- ('\u{223}', ['\u{222}', '\u{0}', '\u{0}']), ('\u{225}', ['\u{224}', '\u{0}', '\u{0}']),
- ('\u{227}', ['\u{226}', '\u{0}', '\u{0}']), ('\u{229}', ['\u{228}', '\u{0}', '\u{0}']),
- ('\u{22b}', ['\u{22a}', '\u{0}', '\u{0}']), ('\u{22d}', ['\u{22c}', '\u{0}', '\u{0}']),
- ('\u{22f}', ['\u{22e}', '\u{0}', '\u{0}']), ('\u{231}', ['\u{230}', '\u{0}', '\u{0}']),
- ('\u{233}', ['\u{232}', '\u{0}', '\u{0}']), ('\u{23c}', ['\u{23b}', '\u{0}', '\u{0}']),
- ('\u{23f}', ['\u{2c7e}', '\u{0}', '\u{0}']), ('\u{240}', ['\u{2c7f}', '\u{0}', '\u{0}']),
- ('\u{242}', ['\u{241}', '\u{0}', '\u{0}']), ('\u{247}', ['\u{246}', '\u{0}', '\u{0}']),
- ('\u{249}', ['\u{248}', '\u{0}', '\u{0}']), ('\u{24b}', ['\u{24a}', '\u{0}', '\u{0}']),
- ('\u{24d}', ['\u{24c}', '\u{0}', '\u{0}']), ('\u{24f}', ['\u{24e}', '\u{0}', '\u{0}']),
- ('\u{250}', ['\u{2c6f}', '\u{0}', '\u{0}']), ('\u{251}', ['\u{2c6d}', '\u{0}', '\u{0}']),
- ('\u{252}', ['\u{2c70}', '\u{0}', '\u{0}']), ('\u{253}', ['\u{181}', '\u{0}', '\u{0}']),
- ('\u{254}', ['\u{186}', '\u{0}', '\u{0}']), ('\u{256}', ['\u{189}', '\u{0}', '\u{0}']),
- ('\u{257}', ['\u{18a}', '\u{0}', '\u{0}']), ('\u{259}', ['\u{18f}', '\u{0}', '\u{0}']),
- ('\u{25b}', ['\u{190}', '\u{0}', '\u{0}']), ('\u{25c}', ['\u{a7ab}', '\u{0}', '\u{0}']),
- ('\u{260}', ['\u{193}', '\u{0}', '\u{0}']), ('\u{261}', ['\u{a7ac}', '\u{0}', '\u{0}']),
- ('\u{263}', ['\u{194}', '\u{0}', '\u{0}']), ('\u{265}', ['\u{a78d}', '\u{0}', '\u{0}']),
- ('\u{266}', ['\u{a7aa}', '\u{0}', '\u{0}']), ('\u{268}', ['\u{197}', '\u{0}', '\u{0}']),
- ('\u{269}', ['\u{196}', '\u{0}', '\u{0}']), ('\u{26a}', ['\u{a7ae}', '\u{0}', '\u{0}']),
- ('\u{26b}', ['\u{2c62}', '\u{0}', '\u{0}']), ('\u{26c}', ['\u{a7ad}', '\u{0}', '\u{0}']),
- ('\u{26f}', ['\u{19c}', '\u{0}', '\u{0}']), ('\u{271}', ['\u{2c6e}', '\u{0}', '\u{0}']),
- ('\u{272}', ['\u{19d}', '\u{0}', '\u{0}']), ('\u{275}', ['\u{19f}', '\u{0}', '\u{0}']),
- ('\u{27d}', ['\u{2c64}', '\u{0}', '\u{0}']), ('\u{280}', ['\u{1a6}', '\u{0}', '\u{0}']),
- ('\u{282}', ['\u{a7c5}', '\u{0}', '\u{0}']), ('\u{283}', ['\u{1a9}', '\u{0}', '\u{0}']),
- ('\u{287}', ['\u{a7b1}', '\u{0}', '\u{0}']), ('\u{288}', ['\u{1ae}', '\u{0}', '\u{0}']),
- ('\u{289}', ['\u{244}', '\u{0}', '\u{0}']), ('\u{28a}', ['\u{1b1}', '\u{0}', '\u{0}']),
- ('\u{28b}', ['\u{1b2}', '\u{0}', '\u{0}']), ('\u{28c}', ['\u{245}', '\u{0}', '\u{0}']),
- ('\u{292}', ['\u{1b7}', '\u{0}', '\u{0}']), ('\u{29d}', ['\u{a7b2}', '\u{0}', '\u{0}']),
- ('\u{29e}', ['\u{a7b0}', '\u{0}', '\u{0}']), ('\u{345}', ['\u{399}', '\u{0}', '\u{0}']),
- ('\u{371}', ['\u{370}', '\u{0}', '\u{0}']), ('\u{373}', ['\u{372}', '\u{0}', '\u{0}']),
- ('\u{377}', ['\u{376}', '\u{0}', '\u{0}']), ('\u{37b}', ['\u{3fd}', '\u{0}', '\u{0}']),
- ('\u{37c}', ['\u{3fe}', '\u{0}', '\u{0}']), ('\u{37d}', ['\u{3ff}', '\u{0}', '\u{0}']),
- ('\u{390}', ['\u{399}', '\u{308}', '\u{301}']), ('\u{3ac}', ['\u{386}', '\u{0}', '\u{0}']),
- ('\u{3ad}', ['\u{388}', '\u{0}', '\u{0}']), ('\u{3ae}', ['\u{389}', '\u{0}', '\u{0}']),
- ('\u{3af}', ['\u{38a}', '\u{0}', '\u{0}']), ('\u{3b0}', ['\u{3a5}', '\u{308}', '\u{301}']),
- ('\u{3b1}', ['\u{391}', '\u{0}', '\u{0}']), ('\u{3b2}', ['\u{392}', '\u{0}', '\u{0}']),
- ('\u{3b3}', ['\u{393}', '\u{0}', '\u{0}']), ('\u{3b4}', ['\u{394}', '\u{0}', '\u{0}']),
- ('\u{3b5}', ['\u{395}', '\u{0}', '\u{0}']), ('\u{3b6}', ['\u{396}', '\u{0}', '\u{0}']),
- ('\u{3b7}', ['\u{397}', '\u{0}', '\u{0}']), ('\u{3b8}', ['\u{398}', '\u{0}', '\u{0}']),
- ('\u{3b9}', ['\u{399}', '\u{0}', '\u{0}']), ('\u{3ba}', ['\u{39a}', '\u{0}', '\u{0}']),
- ('\u{3bb}', ['\u{39b}', '\u{0}', '\u{0}']), ('\u{3bc}', ['\u{39c}', '\u{0}', '\u{0}']),
- ('\u{3bd}', ['\u{39d}', '\u{0}', '\u{0}']), ('\u{3be}', ['\u{39e}', '\u{0}', '\u{0}']),
- ('\u{3bf}', ['\u{39f}', '\u{0}', '\u{0}']), ('\u{3c0}', ['\u{3a0}', '\u{0}', '\u{0}']),
- ('\u{3c1}', ['\u{3a1}', '\u{0}', '\u{0}']), ('\u{3c2}', ['\u{3a3}', '\u{0}', '\u{0}']),
- ('\u{3c3}', ['\u{3a3}', '\u{0}', '\u{0}']), ('\u{3c4}', ['\u{3a4}', '\u{0}', '\u{0}']),
- ('\u{3c5}', ['\u{3a5}', '\u{0}', '\u{0}']), ('\u{3c6}', ['\u{3a6}', '\u{0}', '\u{0}']),
- ('\u{3c7}', ['\u{3a7}', '\u{0}', '\u{0}']), ('\u{3c8}', ['\u{3a8}', '\u{0}', '\u{0}']),
- ('\u{3c9}', ['\u{3a9}', '\u{0}', '\u{0}']), ('\u{3ca}', ['\u{3aa}', '\u{0}', '\u{0}']),
- ('\u{3cb}', ['\u{3ab}', '\u{0}', '\u{0}']), ('\u{3cc}', ['\u{38c}', '\u{0}', '\u{0}']),
- ('\u{3cd}', ['\u{38e}', '\u{0}', '\u{0}']), ('\u{3ce}', ['\u{38f}', '\u{0}', '\u{0}']),
- ('\u{3d0}', ['\u{392}', '\u{0}', '\u{0}']), ('\u{3d1}', ['\u{398}', '\u{0}', '\u{0}']),
- ('\u{3d5}', ['\u{3a6}', '\u{0}', '\u{0}']), ('\u{3d6}', ['\u{3a0}', '\u{0}', '\u{0}']),
- ('\u{3d7}', ['\u{3cf}', '\u{0}', '\u{0}']), ('\u{3d9}', ['\u{3d8}', '\u{0}', '\u{0}']),
- ('\u{3db}', ['\u{3da}', '\u{0}', '\u{0}']), ('\u{3dd}', ['\u{3dc}', '\u{0}', '\u{0}']),
- ('\u{3df}', ['\u{3de}', '\u{0}', '\u{0}']), ('\u{3e1}', ['\u{3e0}', '\u{0}', '\u{0}']),
- ('\u{3e3}', ['\u{3e2}', '\u{0}', '\u{0}']), ('\u{3e5}', ['\u{3e4}', '\u{0}', '\u{0}']),
- ('\u{3e7}', ['\u{3e6}', '\u{0}', '\u{0}']), ('\u{3e9}', ['\u{3e8}', '\u{0}', '\u{0}']),
- ('\u{3eb}', ['\u{3ea}', '\u{0}', '\u{0}']), ('\u{3ed}', ['\u{3ec}', '\u{0}', '\u{0}']),
- ('\u{3ef}', ['\u{3ee}', '\u{0}', '\u{0}']), ('\u{3f0}', ['\u{39a}', '\u{0}', '\u{0}']),
- ('\u{3f1}', ['\u{3a1}', '\u{0}', '\u{0}']), ('\u{3f2}', ['\u{3f9}', '\u{0}', '\u{0}']),
- ('\u{3f3}', ['\u{37f}', '\u{0}', '\u{0}']), ('\u{3f5}', ['\u{395}', '\u{0}', '\u{0}']),
- ('\u{3f8}', ['\u{3f7}', '\u{0}', '\u{0}']), ('\u{3fb}', ['\u{3fa}', '\u{0}', '\u{0}']),
- ('\u{430}', ['\u{410}', '\u{0}', '\u{0}']), ('\u{431}', ['\u{411}', '\u{0}', '\u{0}']),
- ('\u{432}', ['\u{412}', '\u{0}', '\u{0}']), ('\u{433}', ['\u{413}', '\u{0}', '\u{0}']),
- ('\u{434}', ['\u{414}', '\u{0}', '\u{0}']), ('\u{435}', ['\u{415}', '\u{0}', '\u{0}']),
- ('\u{436}', ['\u{416}', '\u{0}', '\u{0}']), ('\u{437}', ['\u{417}', '\u{0}', '\u{0}']),
- ('\u{438}', ['\u{418}', '\u{0}', '\u{0}']), ('\u{439}', ['\u{419}', '\u{0}', '\u{0}']),
- ('\u{43a}', ['\u{41a}', '\u{0}', '\u{0}']), ('\u{43b}', ['\u{41b}', '\u{0}', '\u{0}']),
- ('\u{43c}', ['\u{41c}', '\u{0}', '\u{0}']), ('\u{43d}', ['\u{41d}', '\u{0}', '\u{0}']),
- ('\u{43e}', ['\u{41e}', '\u{0}', '\u{0}']), ('\u{43f}', ['\u{41f}', '\u{0}', '\u{0}']),
- ('\u{440}', ['\u{420}', '\u{0}', '\u{0}']), ('\u{441}', ['\u{421}', '\u{0}', '\u{0}']),
- ('\u{442}', ['\u{422}', '\u{0}', '\u{0}']), ('\u{443}', ['\u{423}', '\u{0}', '\u{0}']),
- ('\u{444}', ['\u{424}', '\u{0}', '\u{0}']), ('\u{445}', ['\u{425}', '\u{0}', '\u{0}']),
- ('\u{446}', ['\u{426}', '\u{0}', '\u{0}']), ('\u{447}', ['\u{427}', '\u{0}', '\u{0}']),
- ('\u{448}', ['\u{428}', '\u{0}', '\u{0}']), ('\u{449}', ['\u{429}', '\u{0}', '\u{0}']),
- ('\u{44a}', ['\u{42a}', '\u{0}', '\u{0}']), ('\u{44b}', ['\u{42b}', '\u{0}', '\u{0}']),
- ('\u{44c}', ['\u{42c}', '\u{0}', '\u{0}']), ('\u{44d}', ['\u{42d}', '\u{0}', '\u{0}']),
- ('\u{44e}', ['\u{42e}', '\u{0}', '\u{0}']), ('\u{44f}', ['\u{42f}', '\u{0}', '\u{0}']),
- ('\u{450}', ['\u{400}', '\u{0}', '\u{0}']), ('\u{451}', ['\u{401}', '\u{0}', '\u{0}']),
- ('\u{452}', ['\u{402}', '\u{0}', '\u{0}']), ('\u{453}', ['\u{403}', '\u{0}', '\u{0}']),
- ('\u{454}', ['\u{404}', '\u{0}', '\u{0}']), ('\u{455}', ['\u{405}', '\u{0}', '\u{0}']),
- ('\u{456}', ['\u{406}', '\u{0}', '\u{0}']), ('\u{457}', ['\u{407}', '\u{0}', '\u{0}']),
- ('\u{458}', ['\u{408}', '\u{0}', '\u{0}']), ('\u{459}', ['\u{409}', '\u{0}', '\u{0}']),
- ('\u{45a}', ['\u{40a}', '\u{0}', '\u{0}']), ('\u{45b}', ['\u{40b}', '\u{0}', '\u{0}']),
- ('\u{45c}', ['\u{40c}', '\u{0}', '\u{0}']), ('\u{45d}', ['\u{40d}', '\u{0}', '\u{0}']),
- ('\u{45e}', ['\u{40e}', '\u{0}', '\u{0}']), ('\u{45f}', ['\u{40f}', '\u{0}', '\u{0}']),
- ('\u{461}', ['\u{460}', '\u{0}', '\u{0}']), ('\u{463}', ['\u{462}', '\u{0}', '\u{0}']),
- ('\u{465}', ['\u{464}', '\u{0}', '\u{0}']), ('\u{467}', ['\u{466}', '\u{0}', '\u{0}']),
- ('\u{469}', ['\u{468}', '\u{0}', '\u{0}']), ('\u{46b}', ['\u{46a}', '\u{0}', '\u{0}']),
- ('\u{46d}', ['\u{46c}', '\u{0}', '\u{0}']), ('\u{46f}', ['\u{46e}', '\u{0}', '\u{0}']),
- ('\u{471}', ['\u{470}', '\u{0}', '\u{0}']), ('\u{473}', ['\u{472}', '\u{0}', '\u{0}']),
- ('\u{475}', ['\u{474}', '\u{0}', '\u{0}']), ('\u{477}', ['\u{476}', '\u{0}', '\u{0}']),
- ('\u{479}', ['\u{478}', '\u{0}', '\u{0}']), ('\u{47b}', ['\u{47a}', '\u{0}', '\u{0}']),
- ('\u{47d}', ['\u{47c}', '\u{0}', '\u{0}']), ('\u{47f}', ['\u{47e}', '\u{0}', '\u{0}']),
- ('\u{481}', ['\u{480}', '\u{0}', '\u{0}']), ('\u{48b}', ['\u{48a}', '\u{0}', '\u{0}']),
- ('\u{48d}', ['\u{48c}', '\u{0}', '\u{0}']), ('\u{48f}', ['\u{48e}', '\u{0}', '\u{0}']),
- ('\u{491}', ['\u{490}', '\u{0}', '\u{0}']), ('\u{493}', ['\u{492}', '\u{0}', '\u{0}']),
- ('\u{495}', ['\u{494}', '\u{0}', '\u{0}']), ('\u{497}', ['\u{496}', '\u{0}', '\u{0}']),
- ('\u{499}', ['\u{498}', '\u{0}', '\u{0}']), ('\u{49b}', ['\u{49a}', '\u{0}', '\u{0}']),
- ('\u{49d}', ['\u{49c}', '\u{0}', '\u{0}']), ('\u{49f}', ['\u{49e}', '\u{0}', '\u{0}']),
- ('\u{4a1}', ['\u{4a0}', '\u{0}', '\u{0}']), ('\u{4a3}', ['\u{4a2}', '\u{0}', '\u{0}']),
- ('\u{4a5}', ['\u{4a4}', '\u{0}', '\u{0}']), ('\u{4a7}', ['\u{4a6}', '\u{0}', '\u{0}']),
- ('\u{4a9}', ['\u{4a8}', '\u{0}', '\u{0}']), ('\u{4ab}', ['\u{4aa}', '\u{0}', '\u{0}']),
- ('\u{4ad}', ['\u{4ac}', '\u{0}', '\u{0}']), ('\u{4af}', ['\u{4ae}', '\u{0}', '\u{0}']),
- ('\u{4b1}', ['\u{4b0}', '\u{0}', '\u{0}']), ('\u{4b3}', ['\u{4b2}', '\u{0}', '\u{0}']),
- ('\u{4b5}', ['\u{4b4}', '\u{0}', '\u{0}']), ('\u{4b7}', ['\u{4b6}', '\u{0}', '\u{0}']),
- ('\u{4b9}', ['\u{4b8}', '\u{0}', '\u{0}']), ('\u{4bb}', ['\u{4ba}', '\u{0}', '\u{0}']),
- ('\u{4bd}', ['\u{4bc}', '\u{0}', '\u{0}']), ('\u{4bf}', ['\u{4be}', '\u{0}', '\u{0}']),
- ('\u{4c2}', ['\u{4c1}', '\u{0}', '\u{0}']), ('\u{4c4}', ['\u{4c3}', '\u{0}', '\u{0}']),
- ('\u{4c6}', ['\u{4c5}', '\u{0}', '\u{0}']), ('\u{4c8}', ['\u{4c7}', '\u{0}', '\u{0}']),
- ('\u{4ca}', ['\u{4c9}', '\u{0}', '\u{0}']), ('\u{4cc}', ['\u{4cb}', '\u{0}', '\u{0}']),
- ('\u{4ce}', ['\u{4cd}', '\u{0}', '\u{0}']), ('\u{4cf}', ['\u{4c0}', '\u{0}', '\u{0}']),
- ('\u{4d1}', ['\u{4d0}', '\u{0}', '\u{0}']), ('\u{4d3}', ['\u{4d2}', '\u{0}', '\u{0}']),
- ('\u{4d5}', ['\u{4d4}', '\u{0}', '\u{0}']), ('\u{4d7}', ['\u{4d6}', '\u{0}', '\u{0}']),
- ('\u{4d9}', ['\u{4d8}', '\u{0}', '\u{0}']), ('\u{4db}', ['\u{4da}', '\u{0}', '\u{0}']),
- ('\u{4dd}', ['\u{4dc}', '\u{0}', '\u{0}']), ('\u{4df}', ['\u{4de}', '\u{0}', '\u{0}']),
- ('\u{4e1}', ['\u{4e0}', '\u{0}', '\u{0}']), ('\u{4e3}', ['\u{4e2}', '\u{0}', '\u{0}']),
- ('\u{4e5}', ['\u{4e4}', '\u{0}', '\u{0}']), ('\u{4e7}', ['\u{4e6}', '\u{0}', '\u{0}']),
- ('\u{4e9}', ['\u{4e8}', '\u{0}', '\u{0}']), ('\u{4eb}', ['\u{4ea}', '\u{0}', '\u{0}']),
- ('\u{4ed}', ['\u{4ec}', '\u{0}', '\u{0}']), ('\u{4ef}', ['\u{4ee}', '\u{0}', '\u{0}']),
- ('\u{4f1}', ['\u{4f0}', '\u{0}', '\u{0}']), ('\u{4f3}', ['\u{4f2}', '\u{0}', '\u{0}']),
- ('\u{4f5}', ['\u{4f4}', '\u{0}', '\u{0}']), ('\u{4f7}', ['\u{4f6}', '\u{0}', '\u{0}']),
- ('\u{4f9}', ['\u{4f8}', '\u{0}', '\u{0}']), ('\u{4fb}', ['\u{4fa}', '\u{0}', '\u{0}']),
- ('\u{4fd}', ['\u{4fc}', '\u{0}', '\u{0}']), ('\u{4ff}', ['\u{4fe}', '\u{0}', '\u{0}']),
- ('\u{501}', ['\u{500}', '\u{0}', '\u{0}']), ('\u{503}', ['\u{502}', '\u{0}', '\u{0}']),
- ('\u{505}', ['\u{504}', '\u{0}', '\u{0}']), ('\u{507}', ['\u{506}', '\u{0}', '\u{0}']),
- ('\u{509}', ['\u{508}', '\u{0}', '\u{0}']), ('\u{50b}', ['\u{50a}', '\u{0}', '\u{0}']),
- ('\u{50d}', ['\u{50c}', '\u{0}', '\u{0}']), ('\u{50f}', ['\u{50e}', '\u{0}', '\u{0}']),
- ('\u{511}', ['\u{510}', '\u{0}', '\u{0}']), ('\u{513}', ['\u{512}', '\u{0}', '\u{0}']),
- ('\u{515}', ['\u{514}', '\u{0}', '\u{0}']), ('\u{517}', ['\u{516}', '\u{0}', '\u{0}']),
- ('\u{519}', ['\u{518}', '\u{0}', '\u{0}']), ('\u{51b}', ['\u{51a}', '\u{0}', '\u{0}']),
- ('\u{51d}', ['\u{51c}', '\u{0}', '\u{0}']), ('\u{51f}', ['\u{51e}', '\u{0}', '\u{0}']),
- ('\u{521}', ['\u{520}', '\u{0}', '\u{0}']), ('\u{523}', ['\u{522}', '\u{0}', '\u{0}']),
- ('\u{525}', ['\u{524}', '\u{0}', '\u{0}']), ('\u{527}', ['\u{526}', '\u{0}', '\u{0}']),
- ('\u{529}', ['\u{528}', '\u{0}', '\u{0}']), ('\u{52b}', ['\u{52a}', '\u{0}', '\u{0}']),
- ('\u{52d}', ['\u{52c}', '\u{0}', '\u{0}']), ('\u{52f}', ['\u{52e}', '\u{0}', '\u{0}']),
- ('\u{561}', ['\u{531}', '\u{0}', '\u{0}']), ('\u{562}', ['\u{532}', '\u{0}', '\u{0}']),
- ('\u{563}', ['\u{533}', '\u{0}', '\u{0}']), ('\u{564}', ['\u{534}', '\u{0}', '\u{0}']),
- ('\u{565}', ['\u{535}', '\u{0}', '\u{0}']), ('\u{566}', ['\u{536}', '\u{0}', '\u{0}']),
- ('\u{567}', ['\u{537}', '\u{0}', '\u{0}']), ('\u{568}', ['\u{538}', '\u{0}', '\u{0}']),
- ('\u{569}', ['\u{539}', '\u{0}', '\u{0}']), ('\u{56a}', ['\u{53a}', '\u{0}', '\u{0}']),
- ('\u{56b}', ['\u{53b}', '\u{0}', '\u{0}']), ('\u{56c}', ['\u{53c}', '\u{0}', '\u{0}']),
- ('\u{56d}', ['\u{53d}', '\u{0}', '\u{0}']), ('\u{56e}', ['\u{53e}', '\u{0}', '\u{0}']),
- ('\u{56f}', ['\u{53f}', '\u{0}', '\u{0}']), ('\u{570}', ['\u{540}', '\u{0}', '\u{0}']),
- ('\u{571}', ['\u{541}', '\u{0}', '\u{0}']), ('\u{572}', ['\u{542}', '\u{0}', '\u{0}']),
- ('\u{573}', ['\u{543}', '\u{0}', '\u{0}']), ('\u{574}', ['\u{544}', '\u{0}', '\u{0}']),
- ('\u{575}', ['\u{545}', '\u{0}', '\u{0}']), ('\u{576}', ['\u{546}', '\u{0}', '\u{0}']),
- ('\u{577}', ['\u{547}', '\u{0}', '\u{0}']), ('\u{578}', ['\u{548}', '\u{0}', '\u{0}']),
- ('\u{579}', ['\u{549}', '\u{0}', '\u{0}']), ('\u{57a}', ['\u{54a}', '\u{0}', '\u{0}']),
- ('\u{57b}', ['\u{54b}', '\u{0}', '\u{0}']), ('\u{57c}', ['\u{54c}', '\u{0}', '\u{0}']),
- ('\u{57d}', ['\u{54d}', '\u{0}', '\u{0}']), ('\u{57e}', ['\u{54e}', '\u{0}', '\u{0}']),
- ('\u{57f}', ['\u{54f}', '\u{0}', '\u{0}']), ('\u{580}', ['\u{550}', '\u{0}', '\u{0}']),
- ('\u{581}', ['\u{551}', '\u{0}', '\u{0}']), ('\u{582}', ['\u{552}', '\u{0}', '\u{0}']),
- ('\u{583}', ['\u{553}', '\u{0}', '\u{0}']), ('\u{584}', ['\u{554}', '\u{0}', '\u{0}']),
- ('\u{585}', ['\u{555}', '\u{0}', '\u{0}']), ('\u{586}', ['\u{556}', '\u{0}', '\u{0}']),
- ('\u{587}', ['\u{535}', '\u{552}', '\u{0}']), ('\u{10d0}', ['\u{1c90}', '\u{0}', '\u{0}']),
- ('\u{10d1}', ['\u{1c91}', '\u{0}', '\u{0}']), ('\u{10d2}', ['\u{1c92}', '\u{0}', '\u{0}']),
- ('\u{10d3}', ['\u{1c93}', '\u{0}', '\u{0}']), ('\u{10d4}', ['\u{1c94}', '\u{0}', '\u{0}']),
- ('\u{10d5}', ['\u{1c95}', '\u{0}', '\u{0}']), ('\u{10d6}', ['\u{1c96}', '\u{0}', '\u{0}']),
- ('\u{10d7}', ['\u{1c97}', '\u{0}', '\u{0}']), ('\u{10d8}', ['\u{1c98}', '\u{0}', '\u{0}']),
- ('\u{10d9}', ['\u{1c99}', '\u{0}', '\u{0}']), ('\u{10da}', ['\u{1c9a}', '\u{0}', '\u{0}']),
- ('\u{10db}', ['\u{1c9b}', '\u{0}', '\u{0}']), ('\u{10dc}', ['\u{1c9c}', '\u{0}', '\u{0}']),
- ('\u{10dd}', ['\u{1c9d}', '\u{0}', '\u{0}']), ('\u{10de}', ['\u{1c9e}', '\u{0}', '\u{0}']),
- ('\u{10df}', ['\u{1c9f}', '\u{0}', '\u{0}']), ('\u{10e0}', ['\u{1ca0}', '\u{0}', '\u{0}']),
- ('\u{10e1}', ['\u{1ca1}', '\u{0}', '\u{0}']), ('\u{10e2}', ['\u{1ca2}', '\u{0}', '\u{0}']),
- ('\u{10e3}', ['\u{1ca3}', '\u{0}', '\u{0}']), ('\u{10e4}', ['\u{1ca4}', '\u{0}', '\u{0}']),
- ('\u{10e5}', ['\u{1ca5}', '\u{0}', '\u{0}']), ('\u{10e6}', ['\u{1ca6}', '\u{0}', '\u{0}']),
- ('\u{10e7}', ['\u{1ca7}', '\u{0}', '\u{0}']), ('\u{10e8}', ['\u{1ca8}', '\u{0}', '\u{0}']),
- ('\u{10e9}', ['\u{1ca9}', '\u{0}', '\u{0}']), ('\u{10ea}', ['\u{1caa}', '\u{0}', '\u{0}']),
- ('\u{10eb}', ['\u{1cab}', '\u{0}', '\u{0}']), ('\u{10ec}', ['\u{1cac}', '\u{0}', '\u{0}']),
- ('\u{10ed}', ['\u{1cad}', '\u{0}', '\u{0}']), ('\u{10ee}', ['\u{1cae}', '\u{0}', '\u{0}']),
- ('\u{10ef}', ['\u{1caf}', '\u{0}', '\u{0}']), ('\u{10f0}', ['\u{1cb0}', '\u{0}', '\u{0}']),
- ('\u{10f1}', ['\u{1cb1}', '\u{0}', '\u{0}']), ('\u{10f2}', ['\u{1cb2}', '\u{0}', '\u{0}']),
- ('\u{10f3}', ['\u{1cb3}', '\u{0}', '\u{0}']), ('\u{10f4}', ['\u{1cb4}', '\u{0}', '\u{0}']),
- ('\u{10f5}', ['\u{1cb5}', '\u{0}', '\u{0}']), ('\u{10f6}', ['\u{1cb6}', '\u{0}', '\u{0}']),
- ('\u{10f7}', ['\u{1cb7}', '\u{0}', '\u{0}']), ('\u{10f8}', ['\u{1cb8}', '\u{0}', '\u{0}']),
- ('\u{10f9}', ['\u{1cb9}', '\u{0}', '\u{0}']), ('\u{10fa}', ['\u{1cba}', '\u{0}', '\u{0}']),
- ('\u{10fd}', ['\u{1cbd}', '\u{0}', '\u{0}']), ('\u{10fe}', ['\u{1cbe}', '\u{0}', '\u{0}']),
- ('\u{10ff}', ['\u{1cbf}', '\u{0}', '\u{0}']), ('\u{13f8}', ['\u{13f0}', '\u{0}', '\u{0}']),
- ('\u{13f9}', ['\u{13f1}', '\u{0}', '\u{0}']), ('\u{13fa}', ['\u{13f2}', '\u{0}', '\u{0}']),
- ('\u{13fb}', ['\u{13f3}', '\u{0}', '\u{0}']), ('\u{13fc}', ['\u{13f4}', '\u{0}', '\u{0}']),
- ('\u{13fd}', ['\u{13f5}', '\u{0}', '\u{0}']), ('\u{1c80}', ['\u{412}', '\u{0}', '\u{0}']),
- ('\u{1c81}', ['\u{414}', '\u{0}', '\u{0}']), ('\u{1c82}', ['\u{41e}', '\u{0}', '\u{0}']),
- ('\u{1c83}', ['\u{421}', '\u{0}', '\u{0}']), ('\u{1c84}', ['\u{422}', '\u{0}', '\u{0}']),
- ('\u{1c85}', ['\u{422}', '\u{0}', '\u{0}']), ('\u{1c86}', ['\u{42a}', '\u{0}', '\u{0}']),
- ('\u{1c87}', ['\u{462}', '\u{0}', '\u{0}']), ('\u{1c88}', ['\u{a64a}', '\u{0}', '\u{0}']),
- ('\u{1d79}', ['\u{a77d}', '\u{0}', '\u{0}']), ('\u{1d7d}', ['\u{2c63}', '\u{0}', '\u{0}']),
- ('\u{1d8e}', ['\u{a7c6}', '\u{0}', '\u{0}']), ('\u{1e01}', ['\u{1e00}', '\u{0}', '\u{0}']),
- ('\u{1e03}', ['\u{1e02}', '\u{0}', '\u{0}']), ('\u{1e05}', ['\u{1e04}', '\u{0}', '\u{0}']),
- ('\u{1e07}', ['\u{1e06}', '\u{0}', '\u{0}']), ('\u{1e09}', ['\u{1e08}', '\u{0}', '\u{0}']),
- ('\u{1e0b}', ['\u{1e0a}', '\u{0}', '\u{0}']), ('\u{1e0d}', ['\u{1e0c}', '\u{0}', '\u{0}']),
- ('\u{1e0f}', ['\u{1e0e}', '\u{0}', '\u{0}']), ('\u{1e11}', ['\u{1e10}', '\u{0}', '\u{0}']),
- ('\u{1e13}', ['\u{1e12}', '\u{0}', '\u{0}']), ('\u{1e15}', ['\u{1e14}', '\u{0}', '\u{0}']),
- ('\u{1e17}', ['\u{1e16}', '\u{0}', '\u{0}']), ('\u{1e19}', ['\u{1e18}', '\u{0}', '\u{0}']),
- ('\u{1e1b}', ['\u{1e1a}', '\u{0}', '\u{0}']), ('\u{1e1d}', ['\u{1e1c}', '\u{0}', '\u{0}']),
- ('\u{1e1f}', ['\u{1e1e}', '\u{0}', '\u{0}']), ('\u{1e21}', ['\u{1e20}', '\u{0}', '\u{0}']),
- ('\u{1e23}', ['\u{1e22}', '\u{0}', '\u{0}']), ('\u{1e25}', ['\u{1e24}', '\u{0}', '\u{0}']),
- ('\u{1e27}', ['\u{1e26}', '\u{0}', '\u{0}']), ('\u{1e29}', ['\u{1e28}', '\u{0}', '\u{0}']),
- ('\u{1e2b}', ['\u{1e2a}', '\u{0}', '\u{0}']), ('\u{1e2d}', ['\u{1e2c}', '\u{0}', '\u{0}']),
- ('\u{1e2f}', ['\u{1e2e}', '\u{0}', '\u{0}']), ('\u{1e31}', ['\u{1e30}', '\u{0}', '\u{0}']),
- ('\u{1e33}', ['\u{1e32}', '\u{0}', '\u{0}']), ('\u{1e35}', ['\u{1e34}', '\u{0}', '\u{0}']),
- ('\u{1e37}', ['\u{1e36}', '\u{0}', '\u{0}']), ('\u{1e39}', ['\u{1e38}', '\u{0}', '\u{0}']),
- ('\u{1e3b}', ['\u{1e3a}', '\u{0}', '\u{0}']), ('\u{1e3d}', ['\u{1e3c}', '\u{0}', '\u{0}']),
- ('\u{1e3f}', ['\u{1e3e}', '\u{0}', '\u{0}']), ('\u{1e41}', ['\u{1e40}', '\u{0}', '\u{0}']),
- ('\u{1e43}', ['\u{1e42}', '\u{0}', '\u{0}']), ('\u{1e45}', ['\u{1e44}', '\u{0}', '\u{0}']),
- ('\u{1e47}', ['\u{1e46}', '\u{0}', '\u{0}']), ('\u{1e49}', ['\u{1e48}', '\u{0}', '\u{0}']),
- ('\u{1e4b}', ['\u{1e4a}', '\u{0}', '\u{0}']), ('\u{1e4d}', ['\u{1e4c}', '\u{0}', '\u{0}']),
- ('\u{1e4f}', ['\u{1e4e}', '\u{0}', '\u{0}']), ('\u{1e51}', ['\u{1e50}', '\u{0}', '\u{0}']),
- ('\u{1e53}', ['\u{1e52}', '\u{0}', '\u{0}']), ('\u{1e55}', ['\u{1e54}', '\u{0}', '\u{0}']),
- ('\u{1e57}', ['\u{1e56}', '\u{0}', '\u{0}']), ('\u{1e59}', ['\u{1e58}', '\u{0}', '\u{0}']),
- ('\u{1e5b}', ['\u{1e5a}', '\u{0}', '\u{0}']), ('\u{1e5d}', ['\u{1e5c}', '\u{0}', '\u{0}']),
- ('\u{1e5f}', ['\u{1e5e}', '\u{0}', '\u{0}']), ('\u{1e61}', ['\u{1e60}', '\u{0}', '\u{0}']),
- ('\u{1e63}', ['\u{1e62}', '\u{0}', '\u{0}']), ('\u{1e65}', ['\u{1e64}', '\u{0}', '\u{0}']),
- ('\u{1e67}', ['\u{1e66}', '\u{0}', '\u{0}']), ('\u{1e69}', ['\u{1e68}', '\u{0}', '\u{0}']),
- ('\u{1e6b}', ['\u{1e6a}', '\u{0}', '\u{0}']), ('\u{1e6d}', ['\u{1e6c}', '\u{0}', '\u{0}']),
- ('\u{1e6f}', ['\u{1e6e}', '\u{0}', '\u{0}']), ('\u{1e71}', ['\u{1e70}', '\u{0}', '\u{0}']),
- ('\u{1e73}', ['\u{1e72}', '\u{0}', '\u{0}']), ('\u{1e75}', ['\u{1e74}', '\u{0}', '\u{0}']),
- ('\u{1e77}', ['\u{1e76}', '\u{0}', '\u{0}']), ('\u{1e79}', ['\u{1e78}', '\u{0}', '\u{0}']),
- ('\u{1e7b}', ['\u{1e7a}', '\u{0}', '\u{0}']), ('\u{1e7d}', ['\u{1e7c}', '\u{0}', '\u{0}']),
- ('\u{1e7f}', ['\u{1e7e}', '\u{0}', '\u{0}']), ('\u{1e81}', ['\u{1e80}', '\u{0}', '\u{0}']),
- ('\u{1e83}', ['\u{1e82}', '\u{0}', '\u{0}']), ('\u{1e85}', ['\u{1e84}', '\u{0}', '\u{0}']),
- ('\u{1e87}', ['\u{1e86}', '\u{0}', '\u{0}']), ('\u{1e89}', ['\u{1e88}', '\u{0}', '\u{0}']),
- ('\u{1e8b}', ['\u{1e8a}', '\u{0}', '\u{0}']), ('\u{1e8d}', ['\u{1e8c}', '\u{0}', '\u{0}']),
- ('\u{1e8f}', ['\u{1e8e}', '\u{0}', '\u{0}']), ('\u{1e91}', ['\u{1e90}', '\u{0}', '\u{0}']),
- ('\u{1e93}', ['\u{1e92}', '\u{0}', '\u{0}']), ('\u{1e95}', ['\u{1e94}', '\u{0}', '\u{0}']),
- ('\u{1e96}', ['H', '\u{331}', '\u{0}']), ('\u{1e97}', ['T', '\u{308}', '\u{0}']),
- ('\u{1e98}', ['W', '\u{30a}', '\u{0}']), ('\u{1e99}', ['Y', '\u{30a}', '\u{0}']),
- ('\u{1e9a}', ['A', '\u{2be}', '\u{0}']), ('\u{1e9b}', ['\u{1e60}', '\u{0}', '\u{0}']),
- ('\u{1ea1}', ['\u{1ea0}', '\u{0}', '\u{0}']), ('\u{1ea3}', ['\u{1ea2}', '\u{0}', '\u{0}']),
- ('\u{1ea5}', ['\u{1ea4}', '\u{0}', '\u{0}']), ('\u{1ea7}', ['\u{1ea6}', '\u{0}', '\u{0}']),
- ('\u{1ea9}', ['\u{1ea8}', '\u{0}', '\u{0}']), ('\u{1eab}', ['\u{1eaa}', '\u{0}', '\u{0}']),
- ('\u{1ead}', ['\u{1eac}', '\u{0}', '\u{0}']), ('\u{1eaf}', ['\u{1eae}', '\u{0}', '\u{0}']),
- ('\u{1eb1}', ['\u{1eb0}', '\u{0}', '\u{0}']), ('\u{1eb3}', ['\u{1eb2}', '\u{0}', '\u{0}']),
- ('\u{1eb5}', ['\u{1eb4}', '\u{0}', '\u{0}']), ('\u{1eb7}', ['\u{1eb6}', '\u{0}', '\u{0}']),
- ('\u{1eb9}', ['\u{1eb8}', '\u{0}', '\u{0}']), ('\u{1ebb}', ['\u{1eba}', '\u{0}', '\u{0}']),
- ('\u{1ebd}', ['\u{1ebc}', '\u{0}', '\u{0}']), ('\u{1ebf}', ['\u{1ebe}', '\u{0}', '\u{0}']),
- ('\u{1ec1}', ['\u{1ec0}', '\u{0}', '\u{0}']), ('\u{1ec3}', ['\u{1ec2}', '\u{0}', '\u{0}']),
- ('\u{1ec5}', ['\u{1ec4}', '\u{0}', '\u{0}']), ('\u{1ec7}', ['\u{1ec6}', '\u{0}', '\u{0}']),
- ('\u{1ec9}', ['\u{1ec8}', '\u{0}', '\u{0}']), ('\u{1ecb}', ['\u{1eca}', '\u{0}', '\u{0}']),
- ('\u{1ecd}', ['\u{1ecc}', '\u{0}', '\u{0}']), ('\u{1ecf}', ['\u{1ece}', '\u{0}', '\u{0}']),
- ('\u{1ed1}', ['\u{1ed0}', '\u{0}', '\u{0}']), ('\u{1ed3}', ['\u{1ed2}', '\u{0}', '\u{0}']),
- ('\u{1ed5}', ['\u{1ed4}', '\u{0}', '\u{0}']), ('\u{1ed7}', ['\u{1ed6}', '\u{0}', '\u{0}']),
- ('\u{1ed9}', ['\u{1ed8}', '\u{0}', '\u{0}']), ('\u{1edb}', ['\u{1eda}', '\u{0}', '\u{0}']),
- ('\u{1edd}', ['\u{1edc}', '\u{0}', '\u{0}']), ('\u{1edf}', ['\u{1ede}', '\u{0}', '\u{0}']),
- ('\u{1ee1}', ['\u{1ee0}', '\u{0}', '\u{0}']), ('\u{1ee3}', ['\u{1ee2}', '\u{0}', '\u{0}']),
- ('\u{1ee5}', ['\u{1ee4}', '\u{0}', '\u{0}']), ('\u{1ee7}', ['\u{1ee6}', '\u{0}', '\u{0}']),
- ('\u{1ee9}', ['\u{1ee8}', '\u{0}', '\u{0}']), ('\u{1eeb}', ['\u{1eea}', '\u{0}', '\u{0}']),
- ('\u{1eed}', ['\u{1eec}', '\u{0}', '\u{0}']), ('\u{1eef}', ['\u{1eee}', '\u{0}', '\u{0}']),
- ('\u{1ef1}', ['\u{1ef0}', '\u{0}', '\u{0}']), ('\u{1ef3}', ['\u{1ef2}', '\u{0}', '\u{0}']),
- ('\u{1ef5}', ['\u{1ef4}', '\u{0}', '\u{0}']), ('\u{1ef7}', ['\u{1ef6}', '\u{0}', '\u{0}']),
- ('\u{1ef9}', ['\u{1ef8}', '\u{0}', '\u{0}']), ('\u{1efb}', ['\u{1efa}', '\u{0}', '\u{0}']),
- ('\u{1efd}', ['\u{1efc}', '\u{0}', '\u{0}']), ('\u{1eff}', ['\u{1efe}', '\u{0}', '\u{0}']),
- ('\u{1f00}', ['\u{1f08}', '\u{0}', '\u{0}']), ('\u{1f01}', ['\u{1f09}', '\u{0}', '\u{0}']),
- ('\u{1f02}', ['\u{1f0a}', '\u{0}', '\u{0}']), ('\u{1f03}', ['\u{1f0b}', '\u{0}', '\u{0}']),
- ('\u{1f04}', ['\u{1f0c}', '\u{0}', '\u{0}']), ('\u{1f05}', ['\u{1f0d}', '\u{0}', '\u{0}']),
- ('\u{1f06}', ['\u{1f0e}', '\u{0}', '\u{0}']), ('\u{1f07}', ['\u{1f0f}', '\u{0}', '\u{0}']),
- ('\u{1f10}', ['\u{1f18}', '\u{0}', '\u{0}']), ('\u{1f11}', ['\u{1f19}', '\u{0}', '\u{0}']),
- ('\u{1f12}', ['\u{1f1a}', '\u{0}', '\u{0}']), ('\u{1f13}', ['\u{1f1b}', '\u{0}', '\u{0}']),
- ('\u{1f14}', ['\u{1f1c}', '\u{0}', '\u{0}']), ('\u{1f15}', ['\u{1f1d}', '\u{0}', '\u{0}']),
- ('\u{1f20}', ['\u{1f28}', '\u{0}', '\u{0}']), ('\u{1f21}', ['\u{1f29}', '\u{0}', '\u{0}']),
- ('\u{1f22}', ['\u{1f2a}', '\u{0}', '\u{0}']), ('\u{1f23}', ['\u{1f2b}', '\u{0}', '\u{0}']),
- ('\u{1f24}', ['\u{1f2c}', '\u{0}', '\u{0}']), ('\u{1f25}', ['\u{1f2d}', '\u{0}', '\u{0}']),
- ('\u{1f26}', ['\u{1f2e}', '\u{0}', '\u{0}']), ('\u{1f27}', ['\u{1f2f}', '\u{0}', '\u{0}']),
- ('\u{1f30}', ['\u{1f38}', '\u{0}', '\u{0}']), ('\u{1f31}', ['\u{1f39}', '\u{0}', '\u{0}']),
- ('\u{1f32}', ['\u{1f3a}', '\u{0}', '\u{0}']), ('\u{1f33}', ['\u{1f3b}', '\u{0}', '\u{0}']),
- ('\u{1f34}', ['\u{1f3c}', '\u{0}', '\u{0}']), ('\u{1f35}', ['\u{1f3d}', '\u{0}', '\u{0}']),
- ('\u{1f36}', ['\u{1f3e}', '\u{0}', '\u{0}']), ('\u{1f37}', ['\u{1f3f}', '\u{0}', '\u{0}']),
- ('\u{1f40}', ['\u{1f48}', '\u{0}', '\u{0}']), ('\u{1f41}', ['\u{1f49}', '\u{0}', '\u{0}']),
- ('\u{1f42}', ['\u{1f4a}', '\u{0}', '\u{0}']), ('\u{1f43}', ['\u{1f4b}', '\u{0}', '\u{0}']),
- ('\u{1f44}', ['\u{1f4c}', '\u{0}', '\u{0}']), ('\u{1f45}', ['\u{1f4d}', '\u{0}', '\u{0}']),
- ('\u{1f50}', ['\u{3a5}', '\u{313}', '\u{0}']), ('\u{1f51}', ['\u{1f59}', '\u{0}', '\u{0}']),
- ('\u{1f52}', ['\u{3a5}', '\u{313}', '\u{300}']),
- ('\u{1f53}', ['\u{1f5b}', '\u{0}', '\u{0}']),
- ('\u{1f54}', ['\u{3a5}', '\u{313}', '\u{301}']),
- ('\u{1f55}', ['\u{1f5d}', '\u{0}', '\u{0}']),
- ('\u{1f56}', ['\u{3a5}', '\u{313}', '\u{342}']),
- ('\u{1f57}', ['\u{1f5f}', '\u{0}', '\u{0}']), ('\u{1f60}', ['\u{1f68}', '\u{0}', '\u{0}']),
- ('\u{1f61}', ['\u{1f69}', '\u{0}', '\u{0}']), ('\u{1f62}', ['\u{1f6a}', '\u{0}', '\u{0}']),
- ('\u{1f63}', ['\u{1f6b}', '\u{0}', '\u{0}']), ('\u{1f64}', ['\u{1f6c}', '\u{0}', '\u{0}']),
- ('\u{1f65}', ['\u{1f6d}', '\u{0}', '\u{0}']), ('\u{1f66}', ['\u{1f6e}', '\u{0}', '\u{0}']),
- ('\u{1f67}', ['\u{1f6f}', '\u{0}', '\u{0}']), ('\u{1f70}', ['\u{1fba}', '\u{0}', '\u{0}']),
- ('\u{1f71}', ['\u{1fbb}', '\u{0}', '\u{0}']), ('\u{1f72}', ['\u{1fc8}', '\u{0}', '\u{0}']),
- ('\u{1f73}', ['\u{1fc9}', '\u{0}', '\u{0}']), ('\u{1f74}', ['\u{1fca}', '\u{0}', '\u{0}']),
- ('\u{1f75}', ['\u{1fcb}', '\u{0}', '\u{0}']), ('\u{1f76}', ['\u{1fda}', '\u{0}', '\u{0}']),
- ('\u{1f77}', ['\u{1fdb}', '\u{0}', '\u{0}']), ('\u{1f78}', ['\u{1ff8}', '\u{0}', '\u{0}']),
- ('\u{1f79}', ['\u{1ff9}', '\u{0}', '\u{0}']), ('\u{1f7a}', ['\u{1fea}', '\u{0}', '\u{0}']),
- ('\u{1f7b}', ['\u{1feb}', '\u{0}', '\u{0}']), ('\u{1f7c}', ['\u{1ffa}', '\u{0}', '\u{0}']),
- ('\u{1f7d}', ['\u{1ffb}', '\u{0}', '\u{0}']),
- ('\u{1f80}', ['\u{1f08}', '\u{399}', '\u{0}']),
- ('\u{1f81}', ['\u{1f09}', '\u{399}', '\u{0}']),
- ('\u{1f82}', ['\u{1f0a}', '\u{399}', '\u{0}']),
- ('\u{1f83}', ['\u{1f0b}', '\u{399}', '\u{0}']),
- ('\u{1f84}', ['\u{1f0c}', '\u{399}', '\u{0}']),
- ('\u{1f85}', ['\u{1f0d}', '\u{399}', '\u{0}']),
- ('\u{1f86}', ['\u{1f0e}', '\u{399}', '\u{0}']),
- ('\u{1f87}', ['\u{1f0f}', '\u{399}', '\u{0}']),
- ('\u{1f88}', ['\u{1f08}', '\u{399}', '\u{0}']),
- ('\u{1f89}', ['\u{1f09}', '\u{399}', '\u{0}']),
- ('\u{1f8a}', ['\u{1f0a}', '\u{399}', '\u{0}']),
- ('\u{1f8b}', ['\u{1f0b}', '\u{399}', '\u{0}']),
- ('\u{1f8c}', ['\u{1f0c}', '\u{399}', '\u{0}']),
- ('\u{1f8d}', ['\u{1f0d}', '\u{399}', '\u{0}']),
- ('\u{1f8e}', ['\u{1f0e}', '\u{399}', '\u{0}']),
- ('\u{1f8f}', ['\u{1f0f}', '\u{399}', '\u{0}']),
- ('\u{1f90}', ['\u{1f28}', '\u{399}', '\u{0}']),
- ('\u{1f91}', ['\u{1f29}', '\u{399}', '\u{0}']),
- ('\u{1f92}', ['\u{1f2a}', '\u{399}', '\u{0}']),
- ('\u{1f93}', ['\u{1f2b}', '\u{399}', '\u{0}']),
- ('\u{1f94}', ['\u{1f2c}', '\u{399}', '\u{0}']),
- ('\u{1f95}', ['\u{1f2d}', '\u{399}', '\u{0}']),
- ('\u{1f96}', ['\u{1f2e}', '\u{399}', '\u{0}']),
- ('\u{1f97}', ['\u{1f2f}', '\u{399}', '\u{0}']),
- ('\u{1f98}', ['\u{1f28}', '\u{399}', '\u{0}']),
- ('\u{1f99}', ['\u{1f29}', '\u{399}', '\u{0}']),
- ('\u{1f9a}', ['\u{1f2a}', '\u{399}', '\u{0}']),
- ('\u{1f9b}', ['\u{1f2b}', '\u{399}', '\u{0}']),
- ('\u{1f9c}', ['\u{1f2c}', '\u{399}', '\u{0}']),
- ('\u{1f9d}', ['\u{1f2d}', '\u{399}', '\u{0}']),
- ('\u{1f9e}', ['\u{1f2e}', '\u{399}', '\u{0}']),
- ('\u{1f9f}', ['\u{1f2f}', '\u{399}', '\u{0}']),
- ('\u{1fa0}', ['\u{1f68}', '\u{399}', '\u{0}']),
- ('\u{1fa1}', ['\u{1f69}', '\u{399}', '\u{0}']),
- ('\u{1fa2}', ['\u{1f6a}', '\u{399}', '\u{0}']),
- ('\u{1fa3}', ['\u{1f6b}', '\u{399}', '\u{0}']),
- ('\u{1fa4}', ['\u{1f6c}', '\u{399}', '\u{0}']),
- ('\u{1fa5}', ['\u{1f6d}', '\u{399}', '\u{0}']),
- ('\u{1fa6}', ['\u{1f6e}', '\u{399}', '\u{0}']),
- ('\u{1fa7}', ['\u{1f6f}', '\u{399}', '\u{0}']),
- ('\u{1fa8}', ['\u{1f68}', '\u{399}', '\u{0}']),
- ('\u{1fa9}', ['\u{1f69}', '\u{399}', '\u{0}']),
- ('\u{1faa}', ['\u{1f6a}', '\u{399}', '\u{0}']),
- ('\u{1fab}', ['\u{1f6b}', '\u{399}', '\u{0}']),
- ('\u{1fac}', ['\u{1f6c}', '\u{399}', '\u{0}']),
- ('\u{1fad}', ['\u{1f6d}', '\u{399}', '\u{0}']),
- ('\u{1fae}', ['\u{1f6e}', '\u{399}', '\u{0}']),
- ('\u{1faf}', ['\u{1f6f}', '\u{399}', '\u{0}']),
- ('\u{1fb0}', ['\u{1fb8}', '\u{0}', '\u{0}']), ('\u{1fb1}', ['\u{1fb9}', '\u{0}', '\u{0}']),
- ('\u{1fb2}', ['\u{1fba}', '\u{399}', '\u{0}']),
- ('\u{1fb3}', ['\u{391}', '\u{399}', '\u{0}']),
- ('\u{1fb4}', ['\u{386}', '\u{399}', '\u{0}']),
- ('\u{1fb6}', ['\u{391}', '\u{342}', '\u{0}']),
- ('\u{1fb7}', ['\u{391}', '\u{342}', '\u{399}']),
- ('\u{1fbc}', ['\u{391}', '\u{399}', '\u{0}']), ('\u{1fbe}', ['\u{399}', '\u{0}', '\u{0}']),
- ('\u{1fc2}', ['\u{1fca}', '\u{399}', '\u{0}']),
- ('\u{1fc3}', ['\u{397}', '\u{399}', '\u{0}']),
- ('\u{1fc4}', ['\u{389}', '\u{399}', '\u{0}']),
- ('\u{1fc6}', ['\u{397}', '\u{342}', '\u{0}']),
- ('\u{1fc7}', ['\u{397}', '\u{342}', '\u{399}']),
- ('\u{1fcc}', ['\u{397}', '\u{399}', '\u{0}']), ('\u{1fd0}', ['\u{1fd8}', '\u{0}', '\u{0}']),
- ('\u{1fd1}', ['\u{1fd9}', '\u{0}', '\u{0}']),
- ('\u{1fd2}', ['\u{399}', '\u{308}', '\u{300}']),
- ('\u{1fd3}', ['\u{399}', '\u{308}', '\u{301}']),
- ('\u{1fd6}', ['\u{399}', '\u{342}', '\u{0}']),
- ('\u{1fd7}', ['\u{399}', '\u{308}', '\u{342}']),
- ('\u{1fe0}', ['\u{1fe8}', '\u{0}', '\u{0}']), ('\u{1fe1}', ['\u{1fe9}', '\u{0}', '\u{0}']),
- ('\u{1fe2}', ['\u{3a5}', '\u{308}', '\u{300}']),
- ('\u{1fe3}', ['\u{3a5}', '\u{308}', '\u{301}']),
- ('\u{1fe4}', ['\u{3a1}', '\u{313}', '\u{0}']), ('\u{1fe5}', ['\u{1fec}', '\u{0}', '\u{0}']),
- ('\u{1fe6}', ['\u{3a5}', '\u{342}', '\u{0}']),
- ('\u{1fe7}', ['\u{3a5}', '\u{308}', '\u{342}']),
- ('\u{1ff2}', ['\u{1ffa}', '\u{399}', '\u{0}']),
- ('\u{1ff3}', ['\u{3a9}', '\u{399}', '\u{0}']),
- ('\u{1ff4}', ['\u{38f}', '\u{399}', '\u{0}']),
- ('\u{1ff6}', ['\u{3a9}', '\u{342}', '\u{0}']),
- ('\u{1ff7}', ['\u{3a9}', '\u{342}', '\u{399}']),
- ('\u{1ffc}', ['\u{3a9}', '\u{399}', '\u{0}']), ('\u{214e}', ['\u{2132}', '\u{0}', '\u{0}']),
- ('\u{2170}', ['\u{2160}', '\u{0}', '\u{0}']), ('\u{2171}', ['\u{2161}', '\u{0}', '\u{0}']),
- ('\u{2172}', ['\u{2162}', '\u{0}', '\u{0}']), ('\u{2173}', ['\u{2163}', '\u{0}', '\u{0}']),
- ('\u{2174}', ['\u{2164}', '\u{0}', '\u{0}']), ('\u{2175}', ['\u{2165}', '\u{0}', '\u{0}']),
- ('\u{2176}', ['\u{2166}', '\u{0}', '\u{0}']), ('\u{2177}', ['\u{2167}', '\u{0}', '\u{0}']),
- ('\u{2178}', ['\u{2168}', '\u{0}', '\u{0}']), ('\u{2179}', ['\u{2169}', '\u{0}', '\u{0}']),
- ('\u{217a}', ['\u{216a}', '\u{0}', '\u{0}']), ('\u{217b}', ['\u{216b}', '\u{0}', '\u{0}']),
- ('\u{217c}', ['\u{216c}', '\u{0}', '\u{0}']), ('\u{217d}', ['\u{216d}', '\u{0}', '\u{0}']),
- ('\u{217e}', ['\u{216e}', '\u{0}', '\u{0}']), ('\u{217f}', ['\u{216f}', '\u{0}', '\u{0}']),
- ('\u{2184}', ['\u{2183}', '\u{0}', '\u{0}']), ('\u{24d0}', ['\u{24b6}', '\u{0}', '\u{0}']),
- ('\u{24d1}', ['\u{24b7}', '\u{0}', '\u{0}']), ('\u{24d2}', ['\u{24b8}', '\u{0}', '\u{0}']),
- ('\u{24d3}', ['\u{24b9}', '\u{0}', '\u{0}']), ('\u{24d4}', ['\u{24ba}', '\u{0}', '\u{0}']),
- ('\u{24d5}', ['\u{24bb}', '\u{0}', '\u{0}']), ('\u{24d6}', ['\u{24bc}', '\u{0}', '\u{0}']),
- ('\u{24d7}', ['\u{24bd}', '\u{0}', '\u{0}']), ('\u{24d8}', ['\u{24be}', '\u{0}', '\u{0}']),
- ('\u{24d9}', ['\u{24bf}', '\u{0}', '\u{0}']), ('\u{24da}', ['\u{24c0}', '\u{0}', '\u{0}']),
- ('\u{24db}', ['\u{24c1}', '\u{0}', '\u{0}']), ('\u{24dc}', ['\u{24c2}', '\u{0}', '\u{0}']),
- ('\u{24dd}', ['\u{24c3}', '\u{0}', '\u{0}']), ('\u{24de}', ['\u{24c4}', '\u{0}', '\u{0}']),
- ('\u{24df}', ['\u{24c5}', '\u{0}', '\u{0}']), ('\u{24e0}', ['\u{24c6}', '\u{0}', '\u{0}']),
- ('\u{24e1}', ['\u{24c7}', '\u{0}', '\u{0}']), ('\u{24e2}', ['\u{24c8}', '\u{0}', '\u{0}']),
- ('\u{24e3}', ['\u{24c9}', '\u{0}', '\u{0}']), ('\u{24e4}', ['\u{24ca}', '\u{0}', '\u{0}']),
- ('\u{24e5}', ['\u{24cb}', '\u{0}', '\u{0}']), ('\u{24e6}', ['\u{24cc}', '\u{0}', '\u{0}']),
- ('\u{24e7}', ['\u{24cd}', '\u{0}', '\u{0}']), ('\u{24e8}', ['\u{24ce}', '\u{0}', '\u{0}']),
- ('\u{24e9}', ['\u{24cf}', '\u{0}', '\u{0}']), ('\u{2c30}', ['\u{2c00}', '\u{0}', '\u{0}']),
- ('\u{2c31}', ['\u{2c01}', '\u{0}', '\u{0}']), ('\u{2c32}', ['\u{2c02}', '\u{0}', '\u{0}']),
- ('\u{2c33}', ['\u{2c03}', '\u{0}', '\u{0}']), ('\u{2c34}', ['\u{2c04}', '\u{0}', '\u{0}']),
- ('\u{2c35}', ['\u{2c05}', '\u{0}', '\u{0}']), ('\u{2c36}', ['\u{2c06}', '\u{0}', '\u{0}']),
- ('\u{2c37}', ['\u{2c07}', '\u{0}', '\u{0}']), ('\u{2c38}', ['\u{2c08}', '\u{0}', '\u{0}']),
- ('\u{2c39}', ['\u{2c09}', '\u{0}', '\u{0}']), ('\u{2c3a}', ['\u{2c0a}', '\u{0}', '\u{0}']),
- ('\u{2c3b}', ['\u{2c0b}', '\u{0}', '\u{0}']), ('\u{2c3c}', ['\u{2c0c}', '\u{0}', '\u{0}']),
- ('\u{2c3d}', ['\u{2c0d}', '\u{0}', '\u{0}']), ('\u{2c3e}', ['\u{2c0e}', '\u{0}', '\u{0}']),
- ('\u{2c3f}', ['\u{2c0f}', '\u{0}', '\u{0}']), ('\u{2c40}', ['\u{2c10}', '\u{0}', '\u{0}']),
- ('\u{2c41}', ['\u{2c11}', '\u{0}', '\u{0}']), ('\u{2c42}', ['\u{2c12}', '\u{0}', '\u{0}']),
- ('\u{2c43}', ['\u{2c13}', '\u{0}', '\u{0}']), ('\u{2c44}', ['\u{2c14}', '\u{0}', '\u{0}']),
- ('\u{2c45}', ['\u{2c15}', '\u{0}', '\u{0}']), ('\u{2c46}', ['\u{2c16}', '\u{0}', '\u{0}']),
- ('\u{2c47}', ['\u{2c17}', '\u{0}', '\u{0}']), ('\u{2c48}', ['\u{2c18}', '\u{0}', '\u{0}']),
- ('\u{2c49}', ['\u{2c19}', '\u{0}', '\u{0}']), ('\u{2c4a}', ['\u{2c1a}', '\u{0}', '\u{0}']),
- ('\u{2c4b}', ['\u{2c1b}', '\u{0}', '\u{0}']), ('\u{2c4c}', ['\u{2c1c}', '\u{0}', '\u{0}']),
- ('\u{2c4d}', ['\u{2c1d}', '\u{0}', '\u{0}']), ('\u{2c4e}', ['\u{2c1e}', '\u{0}', '\u{0}']),
- ('\u{2c4f}', ['\u{2c1f}', '\u{0}', '\u{0}']), ('\u{2c50}', ['\u{2c20}', '\u{0}', '\u{0}']),
- ('\u{2c51}', ['\u{2c21}', '\u{0}', '\u{0}']), ('\u{2c52}', ['\u{2c22}', '\u{0}', '\u{0}']),
- ('\u{2c53}', ['\u{2c23}', '\u{0}', '\u{0}']), ('\u{2c54}', ['\u{2c24}', '\u{0}', '\u{0}']),
- ('\u{2c55}', ['\u{2c25}', '\u{0}', '\u{0}']), ('\u{2c56}', ['\u{2c26}', '\u{0}', '\u{0}']),
- ('\u{2c57}', ['\u{2c27}', '\u{0}', '\u{0}']), ('\u{2c58}', ['\u{2c28}', '\u{0}', '\u{0}']),
- ('\u{2c59}', ['\u{2c29}', '\u{0}', '\u{0}']), ('\u{2c5a}', ['\u{2c2a}', '\u{0}', '\u{0}']),
- ('\u{2c5b}', ['\u{2c2b}', '\u{0}', '\u{0}']), ('\u{2c5c}', ['\u{2c2c}', '\u{0}', '\u{0}']),
- ('\u{2c5d}', ['\u{2c2d}', '\u{0}', '\u{0}']), ('\u{2c5e}', ['\u{2c2e}', '\u{0}', '\u{0}']),
- ('\u{2c5f}', ['\u{2c2f}', '\u{0}', '\u{0}']), ('\u{2c61}', ['\u{2c60}', '\u{0}', '\u{0}']),
- ('\u{2c65}', ['\u{23a}', '\u{0}', '\u{0}']), ('\u{2c66}', ['\u{23e}', '\u{0}', '\u{0}']),
- ('\u{2c68}', ['\u{2c67}', '\u{0}', '\u{0}']), ('\u{2c6a}', ['\u{2c69}', '\u{0}', '\u{0}']),
- ('\u{2c6c}', ['\u{2c6b}', '\u{0}', '\u{0}']), ('\u{2c73}', ['\u{2c72}', '\u{0}', '\u{0}']),
- ('\u{2c76}', ['\u{2c75}', '\u{0}', '\u{0}']), ('\u{2c81}', ['\u{2c80}', '\u{0}', '\u{0}']),
- ('\u{2c83}', ['\u{2c82}', '\u{0}', '\u{0}']), ('\u{2c85}', ['\u{2c84}', '\u{0}', '\u{0}']),
- ('\u{2c87}', ['\u{2c86}', '\u{0}', '\u{0}']), ('\u{2c89}', ['\u{2c88}', '\u{0}', '\u{0}']),
- ('\u{2c8b}', ['\u{2c8a}', '\u{0}', '\u{0}']), ('\u{2c8d}', ['\u{2c8c}', '\u{0}', '\u{0}']),
- ('\u{2c8f}', ['\u{2c8e}', '\u{0}', '\u{0}']), ('\u{2c91}', ['\u{2c90}', '\u{0}', '\u{0}']),
- ('\u{2c93}', ['\u{2c92}', '\u{0}', '\u{0}']), ('\u{2c95}', ['\u{2c94}', '\u{0}', '\u{0}']),
- ('\u{2c97}', ['\u{2c96}', '\u{0}', '\u{0}']), ('\u{2c99}', ['\u{2c98}', '\u{0}', '\u{0}']),
- ('\u{2c9b}', ['\u{2c9a}', '\u{0}', '\u{0}']), ('\u{2c9d}', ['\u{2c9c}', '\u{0}', '\u{0}']),
- ('\u{2c9f}', ['\u{2c9e}', '\u{0}', '\u{0}']), ('\u{2ca1}', ['\u{2ca0}', '\u{0}', '\u{0}']),
- ('\u{2ca3}', ['\u{2ca2}', '\u{0}', '\u{0}']), ('\u{2ca5}', ['\u{2ca4}', '\u{0}', '\u{0}']),
- ('\u{2ca7}', ['\u{2ca6}', '\u{0}', '\u{0}']), ('\u{2ca9}', ['\u{2ca8}', '\u{0}', '\u{0}']),
- ('\u{2cab}', ['\u{2caa}', '\u{0}', '\u{0}']), ('\u{2cad}', ['\u{2cac}', '\u{0}', '\u{0}']),
- ('\u{2caf}', ['\u{2cae}', '\u{0}', '\u{0}']), ('\u{2cb1}', ['\u{2cb0}', '\u{0}', '\u{0}']),
- ('\u{2cb3}', ['\u{2cb2}', '\u{0}', '\u{0}']), ('\u{2cb5}', ['\u{2cb4}', '\u{0}', '\u{0}']),
- ('\u{2cb7}', ['\u{2cb6}', '\u{0}', '\u{0}']), ('\u{2cb9}', ['\u{2cb8}', '\u{0}', '\u{0}']),
- ('\u{2cbb}', ['\u{2cba}', '\u{0}', '\u{0}']), ('\u{2cbd}', ['\u{2cbc}', '\u{0}', '\u{0}']),
- ('\u{2cbf}', ['\u{2cbe}', '\u{0}', '\u{0}']), ('\u{2cc1}', ['\u{2cc0}', '\u{0}', '\u{0}']),
- ('\u{2cc3}', ['\u{2cc2}', '\u{0}', '\u{0}']), ('\u{2cc5}', ['\u{2cc4}', '\u{0}', '\u{0}']),
- ('\u{2cc7}', ['\u{2cc6}', '\u{0}', '\u{0}']), ('\u{2cc9}', ['\u{2cc8}', '\u{0}', '\u{0}']),
- ('\u{2ccb}', ['\u{2cca}', '\u{0}', '\u{0}']), ('\u{2ccd}', ['\u{2ccc}', '\u{0}', '\u{0}']),
- ('\u{2ccf}', ['\u{2cce}', '\u{0}', '\u{0}']), ('\u{2cd1}', ['\u{2cd0}', '\u{0}', '\u{0}']),
- ('\u{2cd3}', ['\u{2cd2}', '\u{0}', '\u{0}']), ('\u{2cd5}', ['\u{2cd4}', '\u{0}', '\u{0}']),
- ('\u{2cd7}', ['\u{2cd6}', '\u{0}', '\u{0}']), ('\u{2cd9}', ['\u{2cd8}', '\u{0}', '\u{0}']),
- ('\u{2cdb}', ['\u{2cda}', '\u{0}', '\u{0}']), ('\u{2cdd}', ['\u{2cdc}', '\u{0}', '\u{0}']),
- ('\u{2cdf}', ['\u{2cde}', '\u{0}', '\u{0}']), ('\u{2ce1}', ['\u{2ce0}', '\u{0}', '\u{0}']),
- ('\u{2ce3}', ['\u{2ce2}', '\u{0}', '\u{0}']), ('\u{2cec}', ['\u{2ceb}', '\u{0}', '\u{0}']),
- ('\u{2cee}', ['\u{2ced}', '\u{0}', '\u{0}']), ('\u{2cf3}', ['\u{2cf2}', '\u{0}', '\u{0}']),
- ('\u{2d00}', ['\u{10a0}', '\u{0}', '\u{0}']), ('\u{2d01}', ['\u{10a1}', '\u{0}', '\u{0}']),
- ('\u{2d02}', ['\u{10a2}', '\u{0}', '\u{0}']), ('\u{2d03}', ['\u{10a3}', '\u{0}', '\u{0}']),
- ('\u{2d04}', ['\u{10a4}', '\u{0}', '\u{0}']), ('\u{2d05}', ['\u{10a5}', '\u{0}', '\u{0}']),
- ('\u{2d06}', ['\u{10a6}', '\u{0}', '\u{0}']), ('\u{2d07}', ['\u{10a7}', '\u{0}', '\u{0}']),
- ('\u{2d08}', ['\u{10a8}', '\u{0}', '\u{0}']), ('\u{2d09}', ['\u{10a9}', '\u{0}', '\u{0}']),
- ('\u{2d0a}', ['\u{10aa}', '\u{0}', '\u{0}']), ('\u{2d0b}', ['\u{10ab}', '\u{0}', '\u{0}']),
- ('\u{2d0c}', ['\u{10ac}', '\u{0}', '\u{0}']), ('\u{2d0d}', ['\u{10ad}', '\u{0}', '\u{0}']),
- ('\u{2d0e}', ['\u{10ae}', '\u{0}', '\u{0}']), ('\u{2d0f}', ['\u{10af}', '\u{0}', '\u{0}']),
- ('\u{2d10}', ['\u{10b0}', '\u{0}', '\u{0}']), ('\u{2d11}', ['\u{10b1}', '\u{0}', '\u{0}']),
- ('\u{2d12}', ['\u{10b2}', '\u{0}', '\u{0}']), ('\u{2d13}', ['\u{10b3}', '\u{0}', '\u{0}']),
- ('\u{2d14}', ['\u{10b4}', '\u{0}', '\u{0}']), ('\u{2d15}', ['\u{10b5}', '\u{0}', '\u{0}']),
- ('\u{2d16}', ['\u{10b6}', '\u{0}', '\u{0}']), ('\u{2d17}', ['\u{10b7}', '\u{0}', '\u{0}']),
- ('\u{2d18}', ['\u{10b8}', '\u{0}', '\u{0}']), ('\u{2d19}', ['\u{10b9}', '\u{0}', '\u{0}']),
- ('\u{2d1a}', ['\u{10ba}', '\u{0}', '\u{0}']), ('\u{2d1b}', ['\u{10bb}', '\u{0}', '\u{0}']),
- ('\u{2d1c}', ['\u{10bc}', '\u{0}', '\u{0}']), ('\u{2d1d}', ['\u{10bd}', '\u{0}', '\u{0}']),
- ('\u{2d1e}', ['\u{10be}', '\u{0}', '\u{0}']), ('\u{2d1f}', ['\u{10bf}', '\u{0}', '\u{0}']),
- ('\u{2d20}', ['\u{10c0}', '\u{0}', '\u{0}']), ('\u{2d21}', ['\u{10c1}', '\u{0}', '\u{0}']),
- ('\u{2d22}', ['\u{10c2}', '\u{0}', '\u{0}']), ('\u{2d23}', ['\u{10c3}', '\u{0}', '\u{0}']),
- ('\u{2d24}', ['\u{10c4}', '\u{0}', '\u{0}']), ('\u{2d25}', ['\u{10c5}', '\u{0}', '\u{0}']),
- ('\u{2d27}', ['\u{10c7}', '\u{0}', '\u{0}']), ('\u{2d2d}', ['\u{10cd}', '\u{0}', '\u{0}']),
- ('\u{a641}', ['\u{a640}', '\u{0}', '\u{0}']), ('\u{a643}', ['\u{a642}', '\u{0}', '\u{0}']),
- ('\u{a645}', ['\u{a644}', '\u{0}', '\u{0}']), ('\u{a647}', ['\u{a646}', '\u{0}', '\u{0}']),
- ('\u{a649}', ['\u{a648}', '\u{0}', '\u{0}']), ('\u{a64b}', ['\u{a64a}', '\u{0}', '\u{0}']),
- ('\u{a64d}', ['\u{a64c}', '\u{0}', '\u{0}']), ('\u{a64f}', ['\u{a64e}', '\u{0}', '\u{0}']),
- ('\u{a651}', ['\u{a650}', '\u{0}', '\u{0}']), ('\u{a653}', ['\u{a652}', '\u{0}', '\u{0}']),
- ('\u{a655}', ['\u{a654}', '\u{0}', '\u{0}']), ('\u{a657}', ['\u{a656}', '\u{0}', '\u{0}']),
- ('\u{a659}', ['\u{a658}', '\u{0}', '\u{0}']), ('\u{a65b}', ['\u{a65a}', '\u{0}', '\u{0}']),
- ('\u{a65d}', ['\u{a65c}', '\u{0}', '\u{0}']), ('\u{a65f}', ['\u{a65e}', '\u{0}', '\u{0}']),
- ('\u{a661}', ['\u{a660}', '\u{0}', '\u{0}']), ('\u{a663}', ['\u{a662}', '\u{0}', '\u{0}']),
- ('\u{a665}', ['\u{a664}', '\u{0}', '\u{0}']), ('\u{a667}', ['\u{a666}', '\u{0}', '\u{0}']),
- ('\u{a669}', ['\u{a668}', '\u{0}', '\u{0}']), ('\u{a66b}', ['\u{a66a}', '\u{0}', '\u{0}']),
- ('\u{a66d}', ['\u{a66c}', '\u{0}', '\u{0}']), ('\u{a681}', ['\u{a680}', '\u{0}', '\u{0}']),
- ('\u{a683}', ['\u{a682}', '\u{0}', '\u{0}']), ('\u{a685}', ['\u{a684}', '\u{0}', '\u{0}']),
- ('\u{a687}', ['\u{a686}', '\u{0}', '\u{0}']), ('\u{a689}', ['\u{a688}', '\u{0}', '\u{0}']),
- ('\u{a68b}', ['\u{a68a}', '\u{0}', '\u{0}']), ('\u{a68d}', ['\u{a68c}', '\u{0}', '\u{0}']),
- ('\u{a68f}', ['\u{a68e}', '\u{0}', '\u{0}']), ('\u{a691}', ['\u{a690}', '\u{0}', '\u{0}']),
- ('\u{a693}', ['\u{a692}', '\u{0}', '\u{0}']), ('\u{a695}', ['\u{a694}', '\u{0}', '\u{0}']),
- ('\u{a697}', ['\u{a696}', '\u{0}', '\u{0}']), ('\u{a699}', ['\u{a698}', '\u{0}', '\u{0}']),
- ('\u{a69b}', ['\u{a69a}', '\u{0}', '\u{0}']), ('\u{a723}', ['\u{a722}', '\u{0}', '\u{0}']),
- ('\u{a725}', ['\u{a724}', '\u{0}', '\u{0}']), ('\u{a727}', ['\u{a726}', '\u{0}', '\u{0}']),
- ('\u{a729}', ['\u{a728}', '\u{0}', '\u{0}']), ('\u{a72b}', ['\u{a72a}', '\u{0}', '\u{0}']),
- ('\u{a72d}', ['\u{a72c}', '\u{0}', '\u{0}']), ('\u{a72f}', ['\u{a72e}', '\u{0}', '\u{0}']),
- ('\u{a733}', ['\u{a732}', '\u{0}', '\u{0}']), ('\u{a735}', ['\u{a734}', '\u{0}', '\u{0}']),
- ('\u{a737}', ['\u{a736}', '\u{0}', '\u{0}']), ('\u{a739}', ['\u{a738}', '\u{0}', '\u{0}']),
- ('\u{a73b}', ['\u{a73a}', '\u{0}', '\u{0}']), ('\u{a73d}', ['\u{a73c}', '\u{0}', '\u{0}']),
- ('\u{a73f}', ['\u{a73e}', '\u{0}', '\u{0}']), ('\u{a741}', ['\u{a740}', '\u{0}', '\u{0}']),
- ('\u{a743}', ['\u{a742}', '\u{0}', '\u{0}']), ('\u{a745}', ['\u{a744}', '\u{0}', '\u{0}']),
- ('\u{a747}', ['\u{a746}', '\u{0}', '\u{0}']), ('\u{a749}', ['\u{a748}', '\u{0}', '\u{0}']),
- ('\u{a74b}', ['\u{a74a}', '\u{0}', '\u{0}']), ('\u{a74d}', ['\u{a74c}', '\u{0}', '\u{0}']),
- ('\u{a74f}', ['\u{a74e}', '\u{0}', '\u{0}']), ('\u{a751}', ['\u{a750}', '\u{0}', '\u{0}']),
- ('\u{a753}', ['\u{a752}', '\u{0}', '\u{0}']), ('\u{a755}', ['\u{a754}', '\u{0}', '\u{0}']),
- ('\u{a757}', ['\u{a756}', '\u{0}', '\u{0}']), ('\u{a759}', ['\u{a758}', '\u{0}', '\u{0}']),
- ('\u{a75b}', ['\u{a75a}', '\u{0}', '\u{0}']), ('\u{a75d}', ['\u{a75c}', '\u{0}', '\u{0}']),
- ('\u{a75f}', ['\u{a75e}', '\u{0}', '\u{0}']), ('\u{a761}', ['\u{a760}', '\u{0}', '\u{0}']),
- ('\u{a763}', ['\u{a762}', '\u{0}', '\u{0}']), ('\u{a765}', ['\u{a764}', '\u{0}', '\u{0}']),
- ('\u{a767}', ['\u{a766}', '\u{0}', '\u{0}']), ('\u{a769}', ['\u{a768}', '\u{0}', '\u{0}']),
- ('\u{a76b}', ['\u{a76a}', '\u{0}', '\u{0}']), ('\u{a76d}', ['\u{a76c}', '\u{0}', '\u{0}']),
- ('\u{a76f}', ['\u{a76e}', '\u{0}', '\u{0}']), ('\u{a77a}', ['\u{a779}', '\u{0}', '\u{0}']),
- ('\u{a77c}', ['\u{a77b}', '\u{0}', '\u{0}']), ('\u{a77f}', ['\u{a77e}', '\u{0}', '\u{0}']),
- ('\u{a781}', ['\u{a780}', '\u{0}', '\u{0}']), ('\u{a783}', ['\u{a782}', '\u{0}', '\u{0}']),
- ('\u{a785}', ['\u{a784}', '\u{0}', '\u{0}']), ('\u{a787}', ['\u{a786}', '\u{0}', '\u{0}']),
- ('\u{a78c}', ['\u{a78b}', '\u{0}', '\u{0}']), ('\u{a791}', ['\u{a790}', '\u{0}', '\u{0}']),
- ('\u{a793}', ['\u{a792}', '\u{0}', '\u{0}']), ('\u{a794}', ['\u{a7c4}', '\u{0}', '\u{0}']),
- ('\u{a797}', ['\u{a796}', '\u{0}', '\u{0}']), ('\u{a799}', ['\u{a798}', '\u{0}', '\u{0}']),
- ('\u{a79b}', ['\u{a79a}', '\u{0}', '\u{0}']), ('\u{a79d}', ['\u{a79c}', '\u{0}', '\u{0}']),
- ('\u{a79f}', ['\u{a79e}', '\u{0}', '\u{0}']), ('\u{a7a1}', ['\u{a7a0}', '\u{0}', '\u{0}']),
- ('\u{a7a3}', ['\u{a7a2}', '\u{0}', '\u{0}']), ('\u{a7a5}', ['\u{a7a4}', '\u{0}', '\u{0}']),
- ('\u{a7a7}', ['\u{a7a6}', '\u{0}', '\u{0}']), ('\u{a7a9}', ['\u{a7a8}', '\u{0}', '\u{0}']),
- ('\u{a7b5}', ['\u{a7b4}', '\u{0}', '\u{0}']), ('\u{a7b7}', ['\u{a7b6}', '\u{0}', '\u{0}']),
- ('\u{a7b9}', ['\u{a7b8}', '\u{0}', '\u{0}']), ('\u{a7bb}', ['\u{a7ba}', '\u{0}', '\u{0}']),
- ('\u{a7bd}', ['\u{a7bc}', '\u{0}', '\u{0}']), ('\u{a7bf}', ['\u{a7be}', '\u{0}', '\u{0}']),
- ('\u{a7c1}', ['\u{a7c0}', '\u{0}', '\u{0}']), ('\u{a7c3}', ['\u{a7c2}', '\u{0}', '\u{0}']),
- ('\u{a7c8}', ['\u{a7c7}', '\u{0}', '\u{0}']), ('\u{a7ca}', ['\u{a7c9}', '\u{0}', '\u{0}']),
- ('\u{a7d1}', ['\u{a7d0}', '\u{0}', '\u{0}']), ('\u{a7d7}', ['\u{a7d6}', '\u{0}', '\u{0}']),
- ('\u{a7d9}', ['\u{a7d8}', '\u{0}', '\u{0}']), ('\u{a7f6}', ['\u{a7f5}', '\u{0}', '\u{0}']),
- ('\u{ab53}', ['\u{a7b3}', '\u{0}', '\u{0}']), ('\u{ab70}', ['\u{13a0}', '\u{0}', '\u{0}']),
- ('\u{ab71}', ['\u{13a1}', '\u{0}', '\u{0}']), ('\u{ab72}', ['\u{13a2}', '\u{0}', '\u{0}']),
- ('\u{ab73}', ['\u{13a3}', '\u{0}', '\u{0}']), ('\u{ab74}', ['\u{13a4}', '\u{0}', '\u{0}']),
- ('\u{ab75}', ['\u{13a5}', '\u{0}', '\u{0}']), ('\u{ab76}', ['\u{13a6}', '\u{0}', '\u{0}']),
- ('\u{ab77}', ['\u{13a7}', '\u{0}', '\u{0}']), ('\u{ab78}', ['\u{13a8}', '\u{0}', '\u{0}']),
- ('\u{ab79}', ['\u{13a9}', '\u{0}', '\u{0}']), ('\u{ab7a}', ['\u{13aa}', '\u{0}', '\u{0}']),
- ('\u{ab7b}', ['\u{13ab}', '\u{0}', '\u{0}']), ('\u{ab7c}', ['\u{13ac}', '\u{0}', '\u{0}']),
- ('\u{ab7d}', ['\u{13ad}', '\u{0}', '\u{0}']), ('\u{ab7e}', ['\u{13ae}', '\u{0}', '\u{0}']),
- ('\u{ab7f}', ['\u{13af}', '\u{0}', '\u{0}']), ('\u{ab80}', ['\u{13b0}', '\u{0}', '\u{0}']),
- ('\u{ab81}', ['\u{13b1}', '\u{0}', '\u{0}']), ('\u{ab82}', ['\u{13b2}', '\u{0}', '\u{0}']),
- ('\u{ab83}', ['\u{13b3}', '\u{0}', '\u{0}']), ('\u{ab84}', ['\u{13b4}', '\u{0}', '\u{0}']),
- ('\u{ab85}', ['\u{13b5}', '\u{0}', '\u{0}']), ('\u{ab86}', ['\u{13b6}', '\u{0}', '\u{0}']),
- ('\u{ab87}', ['\u{13b7}', '\u{0}', '\u{0}']), ('\u{ab88}', ['\u{13b8}', '\u{0}', '\u{0}']),
- ('\u{ab89}', ['\u{13b9}', '\u{0}', '\u{0}']), ('\u{ab8a}', ['\u{13ba}', '\u{0}', '\u{0}']),
- ('\u{ab8b}', ['\u{13bb}', '\u{0}', '\u{0}']), ('\u{ab8c}', ['\u{13bc}', '\u{0}', '\u{0}']),
- ('\u{ab8d}', ['\u{13bd}', '\u{0}', '\u{0}']), ('\u{ab8e}', ['\u{13be}', '\u{0}', '\u{0}']),
- ('\u{ab8f}', ['\u{13bf}', '\u{0}', '\u{0}']), ('\u{ab90}', ['\u{13c0}', '\u{0}', '\u{0}']),
- ('\u{ab91}', ['\u{13c1}', '\u{0}', '\u{0}']), ('\u{ab92}', ['\u{13c2}', '\u{0}', '\u{0}']),
- ('\u{ab93}', ['\u{13c3}', '\u{0}', '\u{0}']), ('\u{ab94}', ['\u{13c4}', '\u{0}', '\u{0}']),
- ('\u{ab95}', ['\u{13c5}', '\u{0}', '\u{0}']), ('\u{ab96}', ['\u{13c6}', '\u{0}', '\u{0}']),
- ('\u{ab97}', ['\u{13c7}', '\u{0}', '\u{0}']), ('\u{ab98}', ['\u{13c8}', '\u{0}', '\u{0}']),
- ('\u{ab99}', ['\u{13c9}', '\u{0}', '\u{0}']), ('\u{ab9a}', ['\u{13ca}', '\u{0}', '\u{0}']),
- ('\u{ab9b}', ['\u{13cb}', '\u{0}', '\u{0}']), ('\u{ab9c}', ['\u{13cc}', '\u{0}', '\u{0}']),
- ('\u{ab9d}', ['\u{13cd}', '\u{0}', '\u{0}']), ('\u{ab9e}', ['\u{13ce}', '\u{0}', '\u{0}']),
- ('\u{ab9f}', ['\u{13cf}', '\u{0}', '\u{0}']), ('\u{aba0}', ['\u{13d0}', '\u{0}', '\u{0}']),
- ('\u{aba1}', ['\u{13d1}', '\u{0}', '\u{0}']), ('\u{aba2}', ['\u{13d2}', '\u{0}', '\u{0}']),
- ('\u{aba3}', ['\u{13d3}', '\u{0}', '\u{0}']), ('\u{aba4}', ['\u{13d4}', '\u{0}', '\u{0}']),
- ('\u{aba5}', ['\u{13d5}', '\u{0}', '\u{0}']), ('\u{aba6}', ['\u{13d6}', '\u{0}', '\u{0}']),
- ('\u{aba7}', ['\u{13d7}', '\u{0}', '\u{0}']), ('\u{aba8}', ['\u{13d8}', '\u{0}', '\u{0}']),
- ('\u{aba9}', ['\u{13d9}', '\u{0}', '\u{0}']), ('\u{abaa}', ['\u{13da}', '\u{0}', '\u{0}']),
- ('\u{abab}', ['\u{13db}', '\u{0}', '\u{0}']), ('\u{abac}', ['\u{13dc}', '\u{0}', '\u{0}']),
- ('\u{abad}', ['\u{13dd}', '\u{0}', '\u{0}']), ('\u{abae}', ['\u{13de}', '\u{0}', '\u{0}']),
- ('\u{abaf}', ['\u{13df}', '\u{0}', '\u{0}']), ('\u{abb0}', ['\u{13e0}', '\u{0}', '\u{0}']),
- ('\u{abb1}', ['\u{13e1}', '\u{0}', '\u{0}']), ('\u{abb2}', ['\u{13e2}', '\u{0}', '\u{0}']),
- ('\u{abb3}', ['\u{13e3}', '\u{0}', '\u{0}']), ('\u{abb4}', ['\u{13e4}', '\u{0}', '\u{0}']),
- ('\u{abb5}', ['\u{13e5}', '\u{0}', '\u{0}']), ('\u{abb6}', ['\u{13e6}', '\u{0}', '\u{0}']),
- ('\u{abb7}', ['\u{13e7}', '\u{0}', '\u{0}']), ('\u{abb8}', ['\u{13e8}', '\u{0}', '\u{0}']),
- ('\u{abb9}', ['\u{13e9}', '\u{0}', '\u{0}']), ('\u{abba}', ['\u{13ea}', '\u{0}', '\u{0}']),
- ('\u{abbb}', ['\u{13eb}', '\u{0}', '\u{0}']), ('\u{abbc}', ['\u{13ec}', '\u{0}', '\u{0}']),
- ('\u{abbd}', ['\u{13ed}', '\u{0}', '\u{0}']), ('\u{abbe}', ['\u{13ee}', '\u{0}', '\u{0}']),
- ('\u{abbf}', ['\u{13ef}', '\u{0}', '\u{0}']), ('\u{fb00}', ['F', 'F', '\u{0}']),
- ('\u{fb01}', ['F', 'I', '\u{0}']), ('\u{fb02}', ['F', 'L', '\u{0}']),
- ('\u{fb03}', ['F', 'F', 'I']), ('\u{fb04}', ['F', 'F', 'L']),
- ('\u{fb05}', ['S', 'T', '\u{0}']), ('\u{fb06}', ['S', 'T', '\u{0}']),
- ('\u{fb13}', ['\u{544}', '\u{546}', '\u{0}']),
- ('\u{fb14}', ['\u{544}', '\u{535}', '\u{0}']),
- ('\u{fb15}', ['\u{544}', '\u{53b}', '\u{0}']),
- ('\u{fb16}', ['\u{54e}', '\u{546}', '\u{0}']),
- ('\u{fb17}', ['\u{544}', '\u{53d}', '\u{0}']), ('\u{ff41}', ['\u{ff21}', '\u{0}', '\u{0}']),
- ('\u{ff42}', ['\u{ff22}', '\u{0}', '\u{0}']), ('\u{ff43}', ['\u{ff23}', '\u{0}', '\u{0}']),
- ('\u{ff44}', ['\u{ff24}', '\u{0}', '\u{0}']), ('\u{ff45}', ['\u{ff25}', '\u{0}', '\u{0}']),
- ('\u{ff46}', ['\u{ff26}', '\u{0}', '\u{0}']), ('\u{ff47}', ['\u{ff27}', '\u{0}', '\u{0}']),
- ('\u{ff48}', ['\u{ff28}', '\u{0}', '\u{0}']), ('\u{ff49}', ['\u{ff29}', '\u{0}', '\u{0}']),
- ('\u{ff4a}', ['\u{ff2a}', '\u{0}', '\u{0}']), ('\u{ff4b}', ['\u{ff2b}', '\u{0}', '\u{0}']),
- ('\u{ff4c}', ['\u{ff2c}', '\u{0}', '\u{0}']), ('\u{ff4d}', ['\u{ff2d}', '\u{0}', '\u{0}']),
- ('\u{ff4e}', ['\u{ff2e}', '\u{0}', '\u{0}']), ('\u{ff4f}', ['\u{ff2f}', '\u{0}', '\u{0}']),
- ('\u{ff50}', ['\u{ff30}', '\u{0}', '\u{0}']), ('\u{ff51}', ['\u{ff31}', '\u{0}', '\u{0}']),
- ('\u{ff52}', ['\u{ff32}', '\u{0}', '\u{0}']), ('\u{ff53}', ['\u{ff33}', '\u{0}', '\u{0}']),
- ('\u{ff54}', ['\u{ff34}', '\u{0}', '\u{0}']), ('\u{ff55}', ['\u{ff35}', '\u{0}', '\u{0}']),
- ('\u{ff56}', ['\u{ff36}', '\u{0}', '\u{0}']), ('\u{ff57}', ['\u{ff37}', '\u{0}', '\u{0}']),
- ('\u{ff58}', ['\u{ff38}', '\u{0}', '\u{0}']), ('\u{ff59}', ['\u{ff39}', '\u{0}', '\u{0}']),
- ('\u{ff5a}', ['\u{ff3a}', '\u{0}', '\u{0}']),
- ('\u{10428}', ['\u{10400}', '\u{0}', '\u{0}']),
- ('\u{10429}', ['\u{10401}', '\u{0}', '\u{0}']),
- ('\u{1042a}', ['\u{10402}', '\u{0}', '\u{0}']),
- ('\u{1042b}', ['\u{10403}', '\u{0}', '\u{0}']),
- ('\u{1042c}', ['\u{10404}', '\u{0}', '\u{0}']),
- ('\u{1042d}', ['\u{10405}', '\u{0}', '\u{0}']),
- ('\u{1042e}', ['\u{10406}', '\u{0}', '\u{0}']),
- ('\u{1042f}', ['\u{10407}', '\u{0}', '\u{0}']),
- ('\u{10430}', ['\u{10408}', '\u{0}', '\u{0}']),
- ('\u{10431}', ['\u{10409}', '\u{0}', '\u{0}']),
- ('\u{10432}', ['\u{1040a}', '\u{0}', '\u{0}']),
- ('\u{10433}', ['\u{1040b}', '\u{0}', '\u{0}']),
- ('\u{10434}', ['\u{1040c}', '\u{0}', '\u{0}']),
- ('\u{10435}', ['\u{1040d}', '\u{0}', '\u{0}']),
- ('\u{10436}', ['\u{1040e}', '\u{0}', '\u{0}']),
- ('\u{10437}', ['\u{1040f}', '\u{0}', '\u{0}']),
- ('\u{10438}', ['\u{10410}', '\u{0}', '\u{0}']),
- ('\u{10439}', ['\u{10411}', '\u{0}', '\u{0}']),
- ('\u{1043a}', ['\u{10412}', '\u{0}', '\u{0}']),
- ('\u{1043b}', ['\u{10413}', '\u{0}', '\u{0}']),
- ('\u{1043c}', ['\u{10414}', '\u{0}', '\u{0}']),
- ('\u{1043d}', ['\u{10415}', '\u{0}', '\u{0}']),
- ('\u{1043e}', ['\u{10416}', '\u{0}', '\u{0}']),
- ('\u{1043f}', ['\u{10417}', '\u{0}', '\u{0}']),
- ('\u{10440}', ['\u{10418}', '\u{0}', '\u{0}']),
- ('\u{10441}', ['\u{10419}', '\u{0}', '\u{0}']),
- ('\u{10442}', ['\u{1041a}', '\u{0}', '\u{0}']),
- ('\u{10443}', ['\u{1041b}', '\u{0}', '\u{0}']),
- ('\u{10444}', ['\u{1041c}', '\u{0}', '\u{0}']),
- ('\u{10445}', ['\u{1041d}', '\u{0}', '\u{0}']),
- ('\u{10446}', ['\u{1041e}', '\u{0}', '\u{0}']),
- ('\u{10447}', ['\u{1041f}', '\u{0}', '\u{0}']),
- ('\u{10448}', ['\u{10420}', '\u{0}', '\u{0}']),
- ('\u{10449}', ['\u{10421}', '\u{0}', '\u{0}']),
- ('\u{1044a}', ['\u{10422}', '\u{0}', '\u{0}']),
- ('\u{1044b}', ['\u{10423}', '\u{0}', '\u{0}']),
- ('\u{1044c}', ['\u{10424}', '\u{0}', '\u{0}']),
- ('\u{1044d}', ['\u{10425}', '\u{0}', '\u{0}']),
- ('\u{1044e}', ['\u{10426}', '\u{0}', '\u{0}']),
- ('\u{1044f}', ['\u{10427}', '\u{0}', '\u{0}']),
- ('\u{104d8}', ['\u{104b0}', '\u{0}', '\u{0}']),
- ('\u{104d9}', ['\u{104b1}', '\u{0}', '\u{0}']),
- ('\u{104da}', ['\u{104b2}', '\u{0}', '\u{0}']),
- ('\u{104db}', ['\u{104b3}', '\u{0}', '\u{0}']),
- ('\u{104dc}', ['\u{104b4}', '\u{0}', '\u{0}']),
- ('\u{104dd}', ['\u{104b5}', '\u{0}', '\u{0}']),
- ('\u{104de}', ['\u{104b6}', '\u{0}', '\u{0}']),
- ('\u{104df}', ['\u{104b7}', '\u{0}', '\u{0}']),
- ('\u{104e0}', ['\u{104b8}', '\u{0}', '\u{0}']),
- ('\u{104e1}', ['\u{104b9}', '\u{0}', '\u{0}']),
- ('\u{104e2}', ['\u{104ba}', '\u{0}', '\u{0}']),
- ('\u{104e3}', ['\u{104bb}', '\u{0}', '\u{0}']),
- ('\u{104e4}', ['\u{104bc}', '\u{0}', '\u{0}']),
- ('\u{104e5}', ['\u{104bd}', '\u{0}', '\u{0}']),
- ('\u{104e6}', ['\u{104be}', '\u{0}', '\u{0}']),
- ('\u{104e7}', ['\u{104bf}', '\u{0}', '\u{0}']),
- ('\u{104e8}', ['\u{104c0}', '\u{0}', '\u{0}']),
- ('\u{104e9}', ['\u{104c1}', '\u{0}', '\u{0}']),
- ('\u{104ea}', ['\u{104c2}', '\u{0}', '\u{0}']),
- ('\u{104eb}', ['\u{104c3}', '\u{0}', '\u{0}']),
- ('\u{104ec}', ['\u{104c4}', '\u{0}', '\u{0}']),
- ('\u{104ed}', ['\u{104c5}', '\u{0}', '\u{0}']),
- ('\u{104ee}', ['\u{104c6}', '\u{0}', '\u{0}']),
- ('\u{104ef}', ['\u{104c7}', '\u{0}', '\u{0}']),
- ('\u{104f0}', ['\u{104c8}', '\u{0}', '\u{0}']),
- ('\u{104f1}', ['\u{104c9}', '\u{0}', '\u{0}']),
- ('\u{104f2}', ['\u{104ca}', '\u{0}', '\u{0}']),
- ('\u{104f3}', ['\u{104cb}', '\u{0}', '\u{0}']),
- ('\u{104f4}', ['\u{104cc}', '\u{0}', '\u{0}']),
- ('\u{104f5}', ['\u{104cd}', '\u{0}', '\u{0}']),
- ('\u{104f6}', ['\u{104ce}', '\u{0}', '\u{0}']),
- ('\u{104f7}', ['\u{104cf}', '\u{0}', '\u{0}']),
- ('\u{104f8}', ['\u{104d0}', '\u{0}', '\u{0}']),
- ('\u{104f9}', ['\u{104d1}', '\u{0}', '\u{0}']),
- ('\u{104fa}', ['\u{104d2}', '\u{0}', '\u{0}']),
- ('\u{104fb}', ['\u{104d3}', '\u{0}', '\u{0}']),
- ('\u{10597}', ['\u{10570}', '\u{0}', '\u{0}']),
- ('\u{10598}', ['\u{10571}', '\u{0}', '\u{0}']),
- ('\u{10599}', ['\u{10572}', '\u{0}', '\u{0}']),
- ('\u{1059a}', ['\u{10573}', '\u{0}', '\u{0}']),
- ('\u{1059b}', ['\u{10574}', '\u{0}', '\u{0}']),
- ('\u{1059c}', ['\u{10575}', '\u{0}', '\u{0}']),
- ('\u{1059d}', ['\u{10576}', '\u{0}', '\u{0}']),
- ('\u{1059e}', ['\u{10577}', '\u{0}', '\u{0}']),
- ('\u{1059f}', ['\u{10578}', '\u{0}', '\u{0}']),
- ('\u{105a0}', ['\u{10579}', '\u{0}', '\u{0}']),
- ('\u{105a1}', ['\u{1057a}', '\u{0}', '\u{0}']),
- ('\u{105a3}', ['\u{1057c}', '\u{0}', '\u{0}']),
- ('\u{105a4}', ['\u{1057d}', '\u{0}', '\u{0}']),
- ('\u{105a5}', ['\u{1057e}', '\u{0}', '\u{0}']),
- ('\u{105a6}', ['\u{1057f}', '\u{0}', '\u{0}']),
- ('\u{105a7}', ['\u{10580}', '\u{0}', '\u{0}']),
- ('\u{105a8}', ['\u{10581}', '\u{0}', '\u{0}']),
- ('\u{105a9}', ['\u{10582}', '\u{0}', '\u{0}']),
- ('\u{105aa}', ['\u{10583}', '\u{0}', '\u{0}']),
- ('\u{105ab}', ['\u{10584}', '\u{0}', '\u{0}']),
- ('\u{105ac}', ['\u{10585}', '\u{0}', '\u{0}']),
- ('\u{105ad}', ['\u{10586}', '\u{0}', '\u{0}']),
- ('\u{105ae}', ['\u{10587}', '\u{0}', '\u{0}']),
- ('\u{105af}', ['\u{10588}', '\u{0}', '\u{0}']),
- ('\u{105b0}', ['\u{10589}', '\u{0}', '\u{0}']),
- ('\u{105b1}', ['\u{1058a}', '\u{0}', '\u{0}']),
- ('\u{105b3}', ['\u{1058c}', '\u{0}', '\u{0}']),
- ('\u{105b4}', ['\u{1058d}', '\u{0}', '\u{0}']),
- ('\u{105b5}', ['\u{1058e}', '\u{0}', '\u{0}']),
- ('\u{105b6}', ['\u{1058f}', '\u{0}', '\u{0}']),
- ('\u{105b7}', ['\u{10590}', '\u{0}', '\u{0}']),
- ('\u{105b8}', ['\u{10591}', '\u{0}', '\u{0}']),
- ('\u{105b9}', ['\u{10592}', '\u{0}', '\u{0}']),
- ('\u{105bb}', ['\u{10594}', '\u{0}', '\u{0}']),
- ('\u{105bc}', ['\u{10595}', '\u{0}', '\u{0}']),
- ('\u{10cc0}', ['\u{10c80}', '\u{0}', '\u{0}']),
- ('\u{10cc1}', ['\u{10c81}', '\u{0}', '\u{0}']),
- ('\u{10cc2}', ['\u{10c82}', '\u{0}', '\u{0}']),
- ('\u{10cc3}', ['\u{10c83}', '\u{0}', '\u{0}']),
- ('\u{10cc4}', ['\u{10c84}', '\u{0}', '\u{0}']),
- ('\u{10cc5}', ['\u{10c85}', '\u{0}', '\u{0}']),
- ('\u{10cc6}', ['\u{10c86}', '\u{0}', '\u{0}']),
- ('\u{10cc7}', ['\u{10c87}', '\u{0}', '\u{0}']),
- ('\u{10cc8}', ['\u{10c88}', '\u{0}', '\u{0}']),
- ('\u{10cc9}', ['\u{10c89}', '\u{0}', '\u{0}']),
- ('\u{10cca}', ['\u{10c8a}', '\u{0}', '\u{0}']),
- ('\u{10ccb}', ['\u{10c8b}', '\u{0}', '\u{0}']),
- ('\u{10ccc}', ['\u{10c8c}', '\u{0}', '\u{0}']),
- ('\u{10ccd}', ['\u{10c8d}', '\u{0}', '\u{0}']),
- ('\u{10cce}', ['\u{10c8e}', '\u{0}', '\u{0}']),
- ('\u{10ccf}', ['\u{10c8f}', '\u{0}', '\u{0}']),
- ('\u{10cd0}', ['\u{10c90}', '\u{0}', '\u{0}']),
- ('\u{10cd1}', ['\u{10c91}', '\u{0}', '\u{0}']),
- ('\u{10cd2}', ['\u{10c92}', '\u{0}', '\u{0}']),
- ('\u{10cd3}', ['\u{10c93}', '\u{0}', '\u{0}']),
- ('\u{10cd4}', ['\u{10c94}', '\u{0}', '\u{0}']),
- ('\u{10cd5}', ['\u{10c95}', '\u{0}', '\u{0}']),
- ('\u{10cd6}', ['\u{10c96}', '\u{0}', '\u{0}']),
- ('\u{10cd7}', ['\u{10c97}', '\u{0}', '\u{0}']),
- ('\u{10cd8}', ['\u{10c98}', '\u{0}', '\u{0}']),
- ('\u{10cd9}', ['\u{10c99}', '\u{0}', '\u{0}']),
- ('\u{10cda}', ['\u{10c9a}', '\u{0}', '\u{0}']),
- ('\u{10cdb}', ['\u{10c9b}', '\u{0}', '\u{0}']),
- ('\u{10cdc}', ['\u{10c9c}', '\u{0}', '\u{0}']),
- ('\u{10cdd}', ['\u{10c9d}', '\u{0}', '\u{0}']),
- ('\u{10cde}', ['\u{10c9e}', '\u{0}', '\u{0}']),
- ('\u{10cdf}', ['\u{10c9f}', '\u{0}', '\u{0}']),
- ('\u{10ce0}', ['\u{10ca0}', '\u{0}', '\u{0}']),
- ('\u{10ce1}', ['\u{10ca1}', '\u{0}', '\u{0}']),
- ('\u{10ce2}', ['\u{10ca2}', '\u{0}', '\u{0}']),
- ('\u{10ce3}', ['\u{10ca3}', '\u{0}', '\u{0}']),
- ('\u{10ce4}', ['\u{10ca4}', '\u{0}', '\u{0}']),
- ('\u{10ce5}', ['\u{10ca5}', '\u{0}', '\u{0}']),
- ('\u{10ce6}', ['\u{10ca6}', '\u{0}', '\u{0}']),
- ('\u{10ce7}', ['\u{10ca7}', '\u{0}', '\u{0}']),
- ('\u{10ce8}', ['\u{10ca8}', '\u{0}', '\u{0}']),
- ('\u{10ce9}', ['\u{10ca9}', '\u{0}', '\u{0}']),
- ('\u{10cea}', ['\u{10caa}', '\u{0}', '\u{0}']),
- ('\u{10ceb}', ['\u{10cab}', '\u{0}', '\u{0}']),
- ('\u{10cec}', ['\u{10cac}', '\u{0}', '\u{0}']),
- ('\u{10ced}', ['\u{10cad}', '\u{0}', '\u{0}']),
- ('\u{10cee}', ['\u{10cae}', '\u{0}', '\u{0}']),
- ('\u{10cef}', ['\u{10caf}', '\u{0}', '\u{0}']),
- ('\u{10cf0}', ['\u{10cb0}', '\u{0}', '\u{0}']),
- ('\u{10cf1}', ['\u{10cb1}', '\u{0}', '\u{0}']),
- ('\u{10cf2}', ['\u{10cb2}', '\u{0}', '\u{0}']),
- ('\u{118c0}', ['\u{118a0}', '\u{0}', '\u{0}']),
- ('\u{118c1}', ['\u{118a1}', '\u{0}', '\u{0}']),
- ('\u{118c2}', ['\u{118a2}', '\u{0}', '\u{0}']),
- ('\u{118c3}', ['\u{118a3}', '\u{0}', '\u{0}']),
- ('\u{118c4}', ['\u{118a4}', '\u{0}', '\u{0}']),
- ('\u{118c5}', ['\u{118a5}', '\u{0}', '\u{0}']),
- ('\u{118c6}', ['\u{118a6}', '\u{0}', '\u{0}']),
- ('\u{118c7}', ['\u{118a7}', '\u{0}', '\u{0}']),
- ('\u{118c8}', ['\u{118a8}', '\u{0}', '\u{0}']),
- ('\u{118c9}', ['\u{118a9}', '\u{0}', '\u{0}']),
- ('\u{118ca}', ['\u{118aa}', '\u{0}', '\u{0}']),
- ('\u{118cb}', ['\u{118ab}', '\u{0}', '\u{0}']),
- ('\u{118cc}', ['\u{118ac}', '\u{0}', '\u{0}']),
- ('\u{118cd}', ['\u{118ad}', '\u{0}', '\u{0}']),
- ('\u{118ce}', ['\u{118ae}', '\u{0}', '\u{0}']),
- ('\u{118cf}', ['\u{118af}', '\u{0}', '\u{0}']),
- ('\u{118d0}', ['\u{118b0}', '\u{0}', '\u{0}']),
- ('\u{118d1}', ['\u{118b1}', '\u{0}', '\u{0}']),
- ('\u{118d2}', ['\u{118b2}', '\u{0}', '\u{0}']),
- ('\u{118d3}', ['\u{118b3}', '\u{0}', '\u{0}']),
- ('\u{118d4}', ['\u{118b4}', '\u{0}', '\u{0}']),
- ('\u{118d5}', ['\u{118b5}', '\u{0}', '\u{0}']),
- ('\u{118d6}', ['\u{118b6}', '\u{0}', '\u{0}']),
- ('\u{118d7}', ['\u{118b7}', '\u{0}', '\u{0}']),
- ('\u{118d8}', ['\u{118b8}', '\u{0}', '\u{0}']),
- ('\u{118d9}', ['\u{118b9}', '\u{0}', '\u{0}']),
- ('\u{118da}', ['\u{118ba}', '\u{0}', '\u{0}']),
- ('\u{118db}', ['\u{118bb}', '\u{0}', '\u{0}']),
- ('\u{118dc}', ['\u{118bc}', '\u{0}', '\u{0}']),
- ('\u{118dd}', ['\u{118bd}', '\u{0}', '\u{0}']),
- ('\u{118de}', ['\u{118be}', '\u{0}', '\u{0}']),
- ('\u{118df}', ['\u{118bf}', '\u{0}', '\u{0}']),
- ('\u{16e60}', ['\u{16e40}', '\u{0}', '\u{0}']),
- ('\u{16e61}', ['\u{16e41}', '\u{0}', '\u{0}']),
- ('\u{16e62}', ['\u{16e42}', '\u{0}', '\u{0}']),
- ('\u{16e63}', ['\u{16e43}', '\u{0}', '\u{0}']),
- ('\u{16e64}', ['\u{16e44}', '\u{0}', '\u{0}']),
- ('\u{16e65}', ['\u{16e45}', '\u{0}', '\u{0}']),
- ('\u{16e66}', ['\u{16e46}', '\u{0}', '\u{0}']),
- ('\u{16e67}', ['\u{16e47}', '\u{0}', '\u{0}']),
- ('\u{16e68}', ['\u{16e48}', '\u{0}', '\u{0}']),
- ('\u{16e69}', ['\u{16e49}', '\u{0}', '\u{0}']),
- ('\u{16e6a}', ['\u{16e4a}', '\u{0}', '\u{0}']),
- ('\u{16e6b}', ['\u{16e4b}', '\u{0}', '\u{0}']),
- ('\u{16e6c}', ['\u{16e4c}', '\u{0}', '\u{0}']),
- ('\u{16e6d}', ['\u{16e4d}', '\u{0}', '\u{0}']),
- ('\u{16e6e}', ['\u{16e4e}', '\u{0}', '\u{0}']),
- ('\u{16e6f}', ['\u{16e4f}', '\u{0}', '\u{0}']),
- ('\u{16e70}', ['\u{16e50}', '\u{0}', '\u{0}']),
- ('\u{16e71}', ['\u{16e51}', '\u{0}', '\u{0}']),
- ('\u{16e72}', ['\u{16e52}', '\u{0}', '\u{0}']),
- ('\u{16e73}', ['\u{16e53}', '\u{0}', '\u{0}']),
- ('\u{16e74}', ['\u{16e54}', '\u{0}', '\u{0}']),
- ('\u{16e75}', ['\u{16e55}', '\u{0}', '\u{0}']),
- ('\u{16e76}', ['\u{16e56}', '\u{0}', '\u{0}']),
- ('\u{16e77}', ['\u{16e57}', '\u{0}', '\u{0}']),
- ('\u{16e78}', ['\u{16e58}', '\u{0}', '\u{0}']),
- ('\u{16e79}', ['\u{16e59}', '\u{0}', '\u{0}']),
- ('\u{16e7a}', ['\u{16e5a}', '\u{0}', '\u{0}']),
- ('\u{16e7b}', ['\u{16e5b}', '\u{0}', '\u{0}']),
- ('\u{16e7c}', ['\u{16e5c}', '\u{0}', '\u{0}']),
- ('\u{16e7d}', ['\u{16e5d}', '\u{0}', '\u{0}']),
- ('\u{16e7e}', ['\u{16e5e}', '\u{0}', '\u{0}']),
- ('\u{16e7f}', ['\u{16e5f}', '\u{0}', '\u{0}']),
- ('\u{1e922}', ['\u{1e900}', '\u{0}', '\u{0}']),
- ('\u{1e923}', ['\u{1e901}', '\u{0}', '\u{0}']),
- ('\u{1e924}', ['\u{1e902}', '\u{0}', '\u{0}']),
- ('\u{1e925}', ['\u{1e903}', '\u{0}', '\u{0}']),
- ('\u{1e926}', ['\u{1e904}', '\u{0}', '\u{0}']),
- ('\u{1e927}', ['\u{1e905}', '\u{0}', '\u{0}']),
- ('\u{1e928}', ['\u{1e906}', '\u{0}', '\u{0}']),
- ('\u{1e929}', ['\u{1e907}', '\u{0}', '\u{0}']),
- ('\u{1e92a}', ['\u{1e908}', '\u{0}', '\u{0}']),
- ('\u{1e92b}', ['\u{1e909}', '\u{0}', '\u{0}']),
- ('\u{1e92c}', ['\u{1e90a}', '\u{0}', '\u{0}']),
- ('\u{1e92d}', ['\u{1e90b}', '\u{0}', '\u{0}']),
- ('\u{1e92e}', ['\u{1e90c}', '\u{0}', '\u{0}']),
- ('\u{1e92f}', ['\u{1e90d}', '\u{0}', '\u{0}']),
- ('\u{1e930}', ['\u{1e90e}', '\u{0}', '\u{0}']),
- ('\u{1e931}', ['\u{1e90f}', '\u{0}', '\u{0}']),
- ('\u{1e932}', ['\u{1e910}', '\u{0}', '\u{0}']),
- ('\u{1e933}', ['\u{1e911}', '\u{0}', '\u{0}']),
- ('\u{1e934}', ['\u{1e912}', '\u{0}', '\u{0}']),
- ('\u{1e935}', ['\u{1e913}', '\u{0}', '\u{0}']),
- ('\u{1e936}', ['\u{1e914}', '\u{0}', '\u{0}']),
- ('\u{1e937}', ['\u{1e915}', '\u{0}', '\u{0}']),
- ('\u{1e938}', ['\u{1e916}', '\u{0}', '\u{0}']),
- ('\u{1e939}', ['\u{1e917}', '\u{0}', '\u{0}']),
- ('\u{1e93a}', ['\u{1e918}', '\u{0}', '\u{0}']),
- ('\u{1e93b}', ['\u{1e919}', '\u{0}', '\u{0}']),
- ('\u{1e93c}', ['\u{1e91a}', '\u{0}', '\u{0}']),
- ('\u{1e93d}', ['\u{1e91b}', '\u{0}', '\u{0}']),
- ('\u{1e93e}', ['\u{1e91c}', '\u{0}', '\u{0}']),
- ('\u{1e93f}', ['\u{1e91d}', '\u{0}', '\u{0}']),
- ('\u{1e940}', ['\u{1e91e}', '\u{0}', '\u{0}']),
- ('\u{1e941}', ['\u{1e91f}', '\u{0}', '\u{0}']),
- ('\u{1e942}', ['\u{1e920}', '\u{0}', '\u{0}']),
- ('\u{1e943}', ['\u{1e921}', '\u{0}', '\u{0}']),
+ static UPPERCASE_TABLE_MULTI: &[[char; 3]] = &[
+ ['S', 'S', '\u{0}'], ['\u{2bc}', 'N', '\u{0}'], ['J', '\u{30c}', '\u{0}'],
+ ['\u{399}', '\u{308}', '\u{301}'], ['\u{3a5}', '\u{308}', '\u{301}'],
+ ['\u{535}', '\u{552}', '\u{0}'], ['H', '\u{331}', '\u{0}'], ['T', '\u{308}', '\u{0}'],
+ ['W', '\u{30a}', '\u{0}'], ['Y', '\u{30a}', '\u{0}'], ['A', '\u{2be}', '\u{0}'],
+ ['\u{3a5}', '\u{313}', '\u{0}'], ['\u{3a5}', '\u{313}', '\u{300}'],
+ ['\u{3a5}', '\u{313}', '\u{301}'], ['\u{3a5}', '\u{313}', '\u{342}'],
+ ['\u{1f08}', '\u{399}', '\u{0}'], ['\u{1f09}', '\u{399}', '\u{0}'],
+ ['\u{1f0a}', '\u{399}', '\u{0}'], ['\u{1f0b}', '\u{399}', '\u{0}'],
+ ['\u{1f0c}', '\u{399}', '\u{0}'], ['\u{1f0d}', '\u{399}', '\u{0}'],
+ ['\u{1f0e}', '\u{399}', '\u{0}'], ['\u{1f0f}', '\u{399}', '\u{0}'],
+ ['\u{1f08}', '\u{399}', '\u{0}'], ['\u{1f09}', '\u{399}', '\u{0}'],
+ ['\u{1f0a}', '\u{399}', '\u{0}'], ['\u{1f0b}', '\u{399}', '\u{0}'],
+ ['\u{1f0c}', '\u{399}', '\u{0}'], ['\u{1f0d}', '\u{399}', '\u{0}'],
+ ['\u{1f0e}', '\u{399}', '\u{0}'], ['\u{1f0f}', '\u{399}', '\u{0}'],
+ ['\u{1f28}', '\u{399}', '\u{0}'], ['\u{1f29}', '\u{399}', '\u{0}'],
+ ['\u{1f2a}', '\u{399}', '\u{0}'], ['\u{1f2b}', '\u{399}', '\u{0}'],
+ ['\u{1f2c}', '\u{399}', '\u{0}'], ['\u{1f2d}', '\u{399}', '\u{0}'],
+ ['\u{1f2e}', '\u{399}', '\u{0}'], ['\u{1f2f}', '\u{399}', '\u{0}'],
+ ['\u{1f28}', '\u{399}', '\u{0}'], ['\u{1f29}', '\u{399}', '\u{0}'],
+ ['\u{1f2a}', '\u{399}', '\u{0}'], ['\u{1f2b}', '\u{399}', '\u{0}'],
+ ['\u{1f2c}', '\u{399}', '\u{0}'], ['\u{1f2d}', '\u{399}', '\u{0}'],
+ ['\u{1f2e}', '\u{399}', '\u{0}'], ['\u{1f2f}', '\u{399}', '\u{0}'],
+ ['\u{1f68}', '\u{399}', '\u{0}'], ['\u{1f69}', '\u{399}', '\u{0}'],
+ ['\u{1f6a}', '\u{399}', '\u{0}'], ['\u{1f6b}', '\u{399}', '\u{0}'],
+ ['\u{1f6c}', '\u{399}', '\u{0}'], ['\u{1f6d}', '\u{399}', '\u{0}'],
+ ['\u{1f6e}', '\u{399}', '\u{0}'], ['\u{1f6f}', '\u{399}', '\u{0}'],
+ ['\u{1f68}', '\u{399}', '\u{0}'], ['\u{1f69}', '\u{399}', '\u{0}'],
+ ['\u{1f6a}', '\u{399}', '\u{0}'], ['\u{1f6b}', '\u{399}', '\u{0}'],
+ ['\u{1f6c}', '\u{399}', '\u{0}'], ['\u{1f6d}', '\u{399}', '\u{0}'],
+ ['\u{1f6e}', '\u{399}', '\u{0}'], ['\u{1f6f}', '\u{399}', '\u{0}'],
+ ['\u{1fba}', '\u{399}', '\u{0}'], ['\u{391}', '\u{399}', '\u{0}'],
+ ['\u{386}', '\u{399}', '\u{0}'], ['\u{391}', '\u{342}', '\u{0}'],
+ ['\u{391}', '\u{342}', '\u{399}'], ['\u{391}', '\u{399}', '\u{0}'],
+ ['\u{1fca}', '\u{399}', '\u{0}'], ['\u{397}', '\u{399}', '\u{0}'],
+ ['\u{389}', '\u{399}', '\u{0}'], ['\u{397}', '\u{342}', '\u{0}'],
+ ['\u{397}', '\u{342}', '\u{399}'], ['\u{397}', '\u{399}', '\u{0}'],
+ ['\u{399}', '\u{308}', '\u{300}'], ['\u{399}', '\u{308}', '\u{301}'],
+ ['\u{399}', '\u{342}', '\u{0}'], ['\u{399}', '\u{308}', '\u{342}'],
+ ['\u{3a5}', '\u{308}', '\u{300}'], ['\u{3a5}', '\u{308}', '\u{301}'],
+ ['\u{3a1}', '\u{313}', '\u{0}'], ['\u{3a5}', '\u{342}', '\u{0}'],
+ ['\u{3a5}', '\u{308}', '\u{342}'], ['\u{1ffa}', '\u{399}', '\u{0}'],
+ ['\u{3a9}', '\u{399}', '\u{0}'], ['\u{38f}', '\u{399}', '\u{0}'],
+ ['\u{3a9}', '\u{342}', '\u{0}'], ['\u{3a9}', '\u{342}', '\u{399}'],
+ ['\u{3a9}', '\u{399}', '\u{0}'], ['F', 'F', '\u{0}'], ['F', 'I', '\u{0}'],
+ ['F', 'L', '\u{0}'], ['F', 'F', 'I'], ['F', 'F', 'L'], ['S', 'T', '\u{0}'],
+ ['S', 'T', '\u{0}'], ['\u{544}', '\u{546}', '\u{0}'], ['\u{544}', '\u{535}', '\u{0}'],
+ ['\u{544}', '\u{53b}', '\u{0}'], ['\u{54e}', '\u{546}', '\u{0}'],
+ ['\u{544}', '\u{53d}', '\u{0}'],
];
}
diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
index 5327e4f81..0869644c0 100644
--- a/library/core/tests/array.rs
+++ b/library/core/tests/array.rs
@@ -1,5 +1,6 @@
-use core::array;
+use core::{array, assert_eq};
use core::convert::TryFrom;
+use core::num::NonZeroUsize;
use core::sync::atomic::{AtomicUsize, Ordering};
#[test]
@@ -557,7 +558,7 @@ fn array_intoiter_advance_by() {
assert_eq!(counter.get(), 13);
let r = it.advance_by(123456);
- assert_eq!(r, Err(87));
+ assert_eq!(r, Err(NonZeroUsize::new(123456 - 87).unwrap()));
assert_eq!(it.len(), 0);
assert_eq!(counter.get(), 100);
@@ -567,7 +568,7 @@ fn array_intoiter_advance_by() {
assert_eq!(counter.get(), 100);
let r = it.advance_by(10);
- assert_eq!(r, Err(0));
+ assert_eq!(r, Err(NonZeroUsize::new(10).unwrap()));
assert_eq!(it.len(), 0);
assert_eq!(counter.get(), 100);
}
@@ -610,7 +611,7 @@ fn array_intoiter_advance_back_by() {
assert_eq!(counter.get(), 13);
let r = it.advance_back_by(123456);
- assert_eq!(r, Err(87));
+ assert_eq!(r, Err(NonZeroUsize::new(123456 - 87).unwrap()));
assert_eq!(it.len(), 0);
assert_eq!(counter.get(), 100);
@@ -620,7 +621,7 @@ fn array_intoiter_advance_back_by() {
assert_eq!(counter.get(), 100);
let r = it.advance_back_by(10);
- assert_eq!(r, Err(0));
+ assert_eq!(r, Err(NonZeroUsize::new(10).unwrap()));
assert_eq!(it.len(), 0);
assert_eq!(counter.get(), 100);
}
@@ -679,8 +680,8 @@ fn array_into_iter_fold() {
let a = [1, 2, 3, 4, 5, 6];
let mut it = a.into_iter();
- it.advance_by(1).unwrap();
- it.advance_back_by(2).unwrap();
+ assert_eq!(it.advance_by(1), Ok(()));
+ assert_eq!(it.advance_back_by(2), Ok(()));
let s = it.fold(10, |a, b| 10 * a + b);
assert_eq!(s, 10234);
}
@@ -695,8 +696,8 @@ fn array_into_iter_rfold() {
let a = [1, 2, 3, 4, 5, 6];
let mut it = a.into_iter();
- it.advance_by(1).unwrap();
- it.advance_back_by(2).unwrap();
+ assert_eq!(it.advance_by(1), Ok(()));
+ assert_eq!(it.advance_back_by(2), Ok(()));
let s = it.rfold(10, |a, b| 10 * a + b);
assert_eq!(s, 10432);
}
diff --git a/library/core/tests/fmt/mod.rs b/library/core/tests/fmt/mod.rs
index 618076358..c1c80c46c 100644
--- a/library/core/tests/fmt/mod.rs
+++ b/library/core/tests/fmt/mod.rs
@@ -22,11 +22,11 @@ fn test_pointer_formats_data_pointer() {
#[test]
fn test_estimated_capacity() {
assert_eq!(format_args!("").estimated_capacity(), 0);
- assert_eq!(format_args!("{}", "").estimated_capacity(), 0);
+ assert_eq!(format_args!("{}", {""}).estimated_capacity(), 0);
assert_eq!(format_args!("Hello").estimated_capacity(), 5);
- assert_eq!(format_args!("Hello, {}!", "").estimated_capacity(), 16);
- assert_eq!(format_args!("{}, hello!", "World").estimated_capacity(), 0);
- assert_eq!(format_args!("{}. 16-bytes piece", "World").estimated_capacity(), 32);
+ assert_eq!(format_args!("Hello, {}!", {""}).estimated_capacity(), 16);
+ assert_eq!(format_args!("{}, hello!", {"World"}).estimated_capacity(), 0);
+ assert_eq!(format_args!("{}. 16-bytes piece", {"World"}).estimated_capacity(), 32);
}
#[test]
diff --git a/library/core/tests/iter/adapters/chain.rs b/library/core/tests/iter/adapters/chain.rs
index f419f9cec..175a1b638 100644
--- a/library/core/tests/iter/adapters/chain.rs
+++ b/library/core/tests/iter/adapters/chain.rs
@@ -1,5 +1,6 @@
use super::*;
use core::iter::*;
+use core::num::NonZeroUsize;
#[test]
fn test_iterator_chain() {
@@ -31,28 +32,28 @@ fn test_iterator_chain_advance_by() {
for i in 0..xs.len() {
let mut iter = Unfuse::new(xs).chain(Unfuse::new(ys));
- iter.advance_by(i).unwrap();
+ assert_eq!(iter.advance_by(i), Ok(()));
assert_eq!(iter.next(), Some(&xs[i]));
- assert_eq!(iter.advance_by(100), Err(len - i - 1));
- iter.advance_by(0).unwrap();
+ assert_eq!(iter.advance_by(100), Err(NonZeroUsize::new(100 - (len - i - 1)).unwrap()));
+ assert_eq!(iter.advance_by(0), Ok(()));
}
for i in 0..ys.len() {
let mut iter = Unfuse::new(xs).chain(Unfuse::new(ys));
- iter.advance_by(xs.len() + i).unwrap();
+ assert_eq!(iter.advance_by(xs.len() + i), Ok(()));
assert_eq!(iter.next(), Some(&ys[i]));
- assert_eq!(iter.advance_by(100), Err(ys.len() - i - 1));
- iter.advance_by(0).unwrap();
+ assert_eq!(iter.advance_by(100), Err(NonZeroUsize::new(100 - (ys.len() - i - 1)).unwrap()));
+ assert_eq!(iter.advance_by(0), Ok(()));
}
let mut iter = xs.iter().chain(ys);
- iter.advance_by(len).unwrap();
+ assert_eq!(iter.advance_by(len), Ok(()));
assert_eq!(iter.next(), None);
- iter.advance_by(0).unwrap();
+ assert_eq!(iter.advance_by(0), Ok(()));
let mut iter = xs.iter().chain(ys);
- assert_eq!(iter.advance_by(len + 1), Err(len));
- iter.advance_by(0).unwrap();
+ assert_eq!(iter.advance_by(len + 1), Err(NonZeroUsize::new(1).unwrap()));
+ assert_eq!(iter.advance_by(0), Ok(()));
}
test_chain(&[], &[]);
@@ -68,28 +69,28 @@ fn test_iterator_chain_advance_back_by() {
for i in 0..ys.len() {
let mut iter = Unfuse::new(xs).chain(Unfuse::new(ys));
- iter.advance_back_by(i).unwrap();
+ assert_eq!(iter.advance_back_by(i), Ok(()));
assert_eq!(iter.next_back(), Some(&ys[ys.len() - i - 1]));
- assert_eq!(iter.advance_back_by(100), Err(len - i - 1));
- iter.advance_back_by(0).unwrap();
+ assert_eq!(iter.advance_back_by(100), Err(NonZeroUsize::new(100 - (len - i - 1)).unwrap()));
+ assert_eq!(iter.advance_back_by(0), Ok(()));
}
for i in 0..xs.len() {
let mut iter = Unfuse::new(xs).chain(Unfuse::new(ys));
- iter.advance_back_by(ys.len() + i).unwrap();
+ assert_eq!(iter.advance_back_by(ys.len() + i), Ok(()));
assert_eq!(iter.next_back(), Some(&xs[xs.len() - i - 1]));
- assert_eq!(iter.advance_back_by(100), Err(xs.len() - i - 1));
- iter.advance_back_by(0).unwrap();
+ assert_eq!(iter.advance_back_by(100), Err(NonZeroUsize::new(100 - (xs.len() - i - 1)).unwrap()));
+ assert_eq!(iter.advance_back_by(0), Ok(()));
}
let mut iter = xs.iter().chain(ys);
- iter.advance_back_by(len).unwrap();
+ assert_eq!(iter.advance_back_by(len), Ok(()));
assert_eq!(iter.next_back(), None);
- iter.advance_back_by(0).unwrap();
+ assert_eq!(iter.advance_back_by(0), Ok(()));
let mut iter = xs.iter().chain(ys);
- assert_eq!(iter.advance_back_by(len + 1), Err(len));
- iter.advance_back_by(0).unwrap();
+ assert_eq!(iter.advance_back_by(len + 1), Err(NonZeroUsize::new(1).unwrap()));
+ assert_eq!(iter.advance_back_by(0), Ok(()));
}
test_chain(&[], &[]);
diff --git a/library/core/tests/iter/adapters/enumerate.rs b/library/core/tests/iter/adapters/enumerate.rs
index 0e6033878..ff57973a6 100644
--- a/library/core/tests/iter/adapters/enumerate.rs
+++ b/library/core/tests/iter/adapters/enumerate.rs
@@ -1,4 +1,5 @@
use core::iter::*;
+use core::num::NonZeroUsize;
#[test]
fn test_iterator_enumerate() {
@@ -56,6 +57,20 @@ fn test_iterator_enumerate_count() {
}
#[test]
+fn test_iterator_enumerate_advance_by() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().enumerate();
+ assert_eq!(it.advance_by(0), Ok(()));
+ assert_eq!(it.next(), Some((0, &0)));
+ assert_eq!(it.advance_by(1), Ok(()));
+ assert_eq!(it.next(), Some((2, &2)));
+ assert_eq!(it.advance_by(2), Ok(()));
+ assert_eq!(it.next(), Some((5, &5)));
+ assert_eq!(it.advance_by(1), Err(NonZeroUsize::new(1).unwrap()));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
fn test_iterator_enumerate_fold() {
let xs = [0, 1, 2, 3, 4, 5];
let mut it = xs.iter().enumerate();
diff --git a/library/core/tests/iter/adapters/flatten.rs b/library/core/tests/iter/adapters/flatten.rs
index 690fd0c21..91809c9e5 100644
--- a/library/core/tests/iter/adapters/flatten.rs
+++ b/library/core/tests/iter/adapters/flatten.rs
@@ -1,5 +1,7 @@
+use core::assert_eq;
use super::*;
use core::iter::*;
+use core::num::NonZeroUsize;
#[test]
fn test_iterator_flatten() {
@@ -61,19 +63,19 @@ fn test_flatten_try_folds() {
fn test_flatten_advance_by() {
let mut it = once(0..10).chain(once(10..30)).chain(once(30..40)).flatten();
- it.advance_by(5).unwrap();
+ assert_eq!(it.advance_by(5), Ok(()));
assert_eq!(it.next(), Some(5));
- it.advance_by(9).unwrap();
+ assert_eq!(it.advance_by(9), Ok(()));
assert_eq!(it.next(), Some(15));
- it.advance_back_by(4).unwrap();
+ assert_eq!(it.advance_back_by(4), Ok(()));
assert_eq!(it.next_back(), Some(35));
- it.advance_back_by(9).unwrap();
+ assert_eq!(it.advance_back_by(9), Ok(()));
assert_eq!(it.next_back(), Some(25));
- assert_eq!(it.advance_by(usize::MAX), Err(9));
- assert_eq!(it.advance_back_by(usize::MAX), Err(0));
- it.advance_by(0).unwrap();
- it.advance_back_by(0).unwrap();
+ assert_eq!(it.advance_by(usize::MAX), Err(NonZeroUsize::new(usize::MAX - 9).unwrap()));
+ assert_eq!(it.advance_back_by(usize::MAX), Err(NonZeroUsize::new(usize::MAX).unwrap()));
+ assert_eq!(it.advance_by(0), Ok(()));
+ assert_eq!(it.advance_back_by(0), Ok(()));
assert_eq!(it.size_hint(), (0, Some(0)));
}
@@ -174,19 +176,19 @@ fn test_flatten_count() {
let mut it = once(0..10).chain(once(10..30)).chain(once(30..40)).flatten();
assert_eq!(it.clone().count(), 40);
- it.advance_by(5).unwrap();
+ assert_eq!(it.advance_by(5), Ok(()));
assert_eq!(it.clone().count(), 35);
- it.advance_back_by(5).unwrap();
+ assert_eq!(it.advance_back_by(5), Ok(()));
assert_eq!(it.clone().count(), 30);
- it.advance_by(10).unwrap();
+ assert_eq!(it.advance_by(10), Ok(()));
assert_eq!(it.clone().count(), 20);
- it.advance_back_by(8).unwrap();
+ assert_eq!(it.advance_back_by(8), Ok(()));
assert_eq!(it.clone().count(), 12);
- it.advance_by(4).unwrap();
+ assert_eq!(it.advance_by(4), Ok(()));
assert_eq!(it.clone().count(), 8);
- it.advance_back_by(5).unwrap();
+ assert_eq!(it.advance_back_by(5), Ok(()));
assert_eq!(it.clone().count(), 3);
- it.advance_by(3).unwrap();
+ assert_eq!(it.advance_by(3), Ok(()));
assert_eq!(it.clone().count(), 0);
}
@@ -195,18 +197,18 @@ fn test_flatten_last() {
let mut it = once(0..10).chain(once(10..30)).chain(once(30..40)).flatten();
assert_eq!(it.clone().last(), Some(39));
- it.advance_by(5).unwrap(); // 5..40
+ assert_eq!(it.advance_by(5), Ok(())); // 5..40
assert_eq!(it.clone().last(), Some(39));
- it.advance_back_by(5).unwrap(); // 5..35
+ assert_eq!(it.advance_back_by(5), Ok(())); // 5..35
assert_eq!(it.clone().last(), Some(34));
- it.advance_by(10).unwrap(); // 15..35
+ assert_eq!(it.advance_by(10), Ok(())); // 15..35
assert_eq!(it.clone().last(), Some(34));
- it.advance_back_by(8).unwrap(); // 15..27
+ assert_eq!(it.advance_back_by(8), Ok(())); // 15..27
assert_eq!(it.clone().last(), Some(26));
- it.advance_by(4).unwrap(); // 19..27
+ assert_eq!(it.advance_by(4), Ok(())); // 19..27
assert_eq!(it.clone().last(), Some(26));
- it.advance_back_by(5).unwrap(); // 19..22
+ assert_eq!(it.advance_back_by(5), Ok(())); // 19..22
assert_eq!(it.clone().last(), Some(21));
- it.advance_by(3).unwrap(); // 22..22
+ assert_eq!(it.advance_by(3), Ok(())); // 22..22
assert_eq!(it.clone().last(), None);
}
diff --git a/library/core/tests/iter/adapters/skip.rs b/library/core/tests/iter/adapters/skip.rs
index 754641834..e3e88a84f 100644
--- a/library/core/tests/iter/adapters/skip.rs
+++ b/library/core/tests/iter/adapters/skip.rs
@@ -1,4 +1,5 @@
use core::iter::*;
+use core::num::NonZeroUsize;
use super::Unfuse;
@@ -74,11 +75,14 @@ fn test_iterator_skip_nth() {
#[test]
fn test_skip_advance_by() {
assert_eq!((0..0).skip(10).advance_by(0), Ok(()));
- assert_eq!((0..0).skip(10).advance_by(1), Err(0));
- assert_eq!((0u128..(usize::MAX as u128) + 1).skip(usize::MAX).advance_by(usize::MAX), Err(1));
- assert_eq!((0u128..u128::MAX).skip(usize::MAX).advance_by(1), Ok(()));
-
- assert_eq!((0..2).skip(1).advance_back_by(10), Err(1));
+ assert_eq!((0..0).skip(10).advance_by(1), Err(NonZeroUsize::new(1).unwrap()));
+ assert_eq!(
+ (0u128..(usize::MAX as u128) + 1).skip(usize::MAX - 10).advance_by(usize::MAX - 5),
+ Err(NonZeroUsize::new(usize::MAX - 16).unwrap())
+ );
+ assert_eq!((0u128..u128::MAX).skip(usize::MAX - 10).advance_by(20), Ok(()));
+
+ assert_eq!((0..2).skip(1).advance_back_by(10), Err(NonZeroUsize::new(9).unwrap()));
assert_eq!((0..0).skip(1).advance_back_by(0), Ok(()));
}
diff --git a/library/core/tests/iter/adapters/take.rs b/library/core/tests/iter/adapters/take.rs
index 3e26b43a2..3cad47c06 100644
--- a/library/core/tests/iter/adapters/take.rs
+++ b/library/core/tests/iter/adapters/take.rs
@@ -1,4 +1,5 @@
use core::iter::*;
+use core::num::NonZeroUsize;
#[test]
fn test_iterator_take() {
@@ -78,21 +79,21 @@ fn test_take_advance_by() {
let mut take = (0..10).take(3);
assert_eq!(take.advance_by(2), Ok(()));
assert_eq!(take.next(), Some(2));
- assert_eq!(take.advance_by(1), Err(0));
+ assert_eq!(take.advance_by(1), Err(NonZeroUsize::new(1).unwrap()));
assert_eq!((0..0).take(10).advance_by(0), Ok(()));
- assert_eq!((0..0).take(10).advance_by(1), Err(0));
- assert_eq!((0..10).take(4).advance_by(5), Err(4));
+ assert_eq!((0..0).take(10).advance_by(1), Err(NonZeroUsize::new(1).unwrap()));
+ assert_eq!((0..10).take(4).advance_by(5), Err(NonZeroUsize::new(1).unwrap()));
let mut take = (0..10).take(3);
assert_eq!(take.advance_back_by(2), Ok(()));
assert_eq!(take.next(), Some(0));
- assert_eq!(take.advance_back_by(1), Err(0));
+ assert_eq!(take.advance_back_by(1), Err(NonZeroUsize::new(1).unwrap()));
- assert_eq!((0..2).take(1).advance_back_by(10), Err(1));
- assert_eq!((0..0).take(1).advance_back_by(1), Err(0));
+ assert_eq!((0..2).take(1).advance_back_by(10), Err(NonZeroUsize::new(9).unwrap()));
+ assert_eq!((0..0).take(1).advance_back_by(1), Err(NonZeroUsize::new(1).unwrap()));
assert_eq!((0..0).take(1).advance_back_by(0), Ok(()));
- assert_eq!((0..usize::MAX).take(100).advance_back_by(usize::MAX), Err(100));
+ assert_eq!((0..usize::MAX).take(100).advance_back_by(usize::MAX), Err(NonZeroUsize::new(usize::MAX - 100).unwrap()));
}
#[test]
diff --git a/library/core/tests/iter/range.rs b/library/core/tests/iter/range.rs
index 0f91ffe2d..0a77ecddb 100644
--- a/library/core/tests/iter/range.rs
+++ b/library/core/tests/iter/range.rs
@@ -1,3 +1,4 @@
+use core::num::NonZeroUsize;
use super::*;
#[test]
@@ -287,25 +288,25 @@ fn test_range_step() {
#[test]
fn test_range_advance_by() {
let mut r = 0..usize::MAX;
- r.advance_by(0).unwrap();
- r.advance_back_by(0).unwrap();
+ assert_eq!(Ok(()), r.advance_by(0));
+ assert_eq!(Ok(()), r.advance_back_by(0));
assert_eq!(r.len(), usize::MAX);
- r.advance_by(1).unwrap();
- r.advance_back_by(1).unwrap();
+ assert_eq!(Ok(()), r.advance_by(1));
+ assert_eq!(Ok(()), r.advance_back_by(1));
assert_eq!((r.start, r.end), (1, usize::MAX - 1));
- assert_eq!(r.advance_by(usize::MAX), Err(usize::MAX - 2));
+ assert_eq!(Err(NonZeroUsize::new(2).unwrap()), r.advance_by(usize::MAX));
- r.advance_by(0).unwrap();
- r.advance_back_by(0).unwrap();
+ assert_eq!(Ok(()), r.advance_by(0));
+ assert_eq!(Ok(()), r.advance_back_by(0));
let mut r = 0u128..u128::MAX;
- r.advance_by(usize::MAX).unwrap();
- r.advance_back_by(usize::MAX).unwrap();
+ assert_eq!(Ok(()), r.advance_by(usize::MAX));
+ assert_eq!(Ok(()), r.advance_back_by(usize::MAX));
assert_eq!((r.start, r.end), (0u128 + usize::MAX as u128, u128::MAX - usize::MAX as u128));
}
diff --git a/library/core/tests/iter/traits/iterator.rs b/library/core/tests/iter/traits/iterator.rs
index 62566a950..9eebfb1f1 100644
--- a/library/core/tests/iter/traits/iterator.rs
+++ b/library/core/tests/iter/traits/iterator.rs
@@ -1,3 +1,5 @@
+use core::num::NonZeroUsize;
+
/// A wrapper struct that implements `Eq` and `Ord` based on the wrapped
/// integer modulo 3. Used to test that `Iterator::max` and `Iterator::min`
/// return the correct element if some of them are equal.
@@ -150,11 +152,11 @@ fn test_iterator_advance_by() {
let mut iter = v.iter();
assert_eq!(iter.advance_by(i), Ok(()));
assert_eq!(iter.next().unwrap(), &v[i]);
- assert_eq!(iter.advance_by(100), Err(v.len() - 1 - i));
+ assert_eq!(iter.advance_by(100), Err(NonZeroUsize::new(100 - (v.len() - 1 - i)).unwrap()));
}
assert_eq!(v.iter().advance_by(v.len()), Ok(()));
- assert_eq!(v.iter().advance_by(100), Err(v.len()));
+ assert_eq!(v.iter().advance_by(100), Err(NonZeroUsize::new(100 - v.len()).unwrap()));
}
#[test]
@@ -165,11 +167,11 @@ fn test_iterator_advance_back_by() {
let mut iter = v.iter();
assert_eq!(iter.advance_back_by(i), Ok(()));
assert_eq!(iter.next_back().unwrap(), &v[v.len() - 1 - i]);
- assert_eq!(iter.advance_back_by(100), Err(v.len() - 1 - i));
+ assert_eq!(iter.advance_back_by(100), Err(NonZeroUsize::new(100 - (v.len() - 1 - i)).unwrap()));
}
assert_eq!(v.iter().advance_back_by(v.len()), Ok(()));
- assert_eq!(v.iter().advance_back_by(100), Err(v.len()));
+ assert_eq!(v.iter().advance_back_by(100), Err(NonZeroUsize::new(100 - v.len()).unwrap()));
}
#[test]
@@ -180,11 +182,11 @@ fn test_iterator_rev_advance_back_by() {
let mut iter = v.iter().rev();
assert_eq!(iter.advance_back_by(i), Ok(()));
assert_eq!(iter.next_back().unwrap(), &v[i]);
- assert_eq!(iter.advance_back_by(100), Err(v.len() - 1 - i));
+ assert_eq!(iter.advance_back_by(100), Err(NonZeroUsize::new(100 - (v.len() - 1 - i)).unwrap()));
}
assert_eq!(v.iter().rev().advance_back_by(v.len()), Ok(()));
- assert_eq!(v.iter().rev().advance_back_by(100), Err(v.len()));
+ assert_eq!(v.iter().rev().advance_back_by(100), Err(NonZeroUsize::new(100 - v.len()).unwrap()));
}
#[test]
@@ -424,11 +426,11 @@ fn test_iterator_rev_advance_by() {
let mut iter = v.iter().rev();
assert_eq!(iter.advance_by(i), Ok(()));
assert_eq!(iter.next().unwrap(), &v[v.len() - 1 - i]);
- assert_eq!(iter.advance_by(100), Err(v.len() - 1 - i));
+ assert_eq!(iter.advance_by(100), Err(NonZeroUsize::new(100 - (v.len() - 1 - i)).unwrap()));
}
assert_eq!(v.iter().rev().advance_by(v.len()), Ok(()));
- assert_eq!(v.iter().rev().advance_by(100), Err(v.len()));
+ assert_eq!(v.iter().rev().advance_by(100), Err(NonZeroUsize::new(100 - v.len()).unwrap()));
}
#[test]
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index ccb7be68e..6cdafa411 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
@@ -91,8 +91,7 @@
#![feature(pointer_is_aligned)]
#![feature(portable_simd)]
#![feature(ptr_metadata)]
-#![feature(once_cell)]
-#![feature(option_result_contains)]
+#![feature(lazy_cell)]
#![feature(unsized_tuple_coercion)]
#![feature(const_option)]
#![feature(const_option_ext)]
diff --git a/library/core/tests/num/dec2flt/mod.rs b/library/core/tests/num/dec2flt/mod.rs
index a2b9bb551..874e0ec70 100644
--- a/library/core/tests/num/dec2flt/mod.rs
+++ b/library/core/tests/num/dec2flt/mod.rs
@@ -127,14 +127,3 @@ fn massive_exponent() {
assert_eq!(format!("1e-{max}000").parse(), Ok(0.0));
assert_eq!(format!("1e{max}000").parse(), Ok(f64::INFINITY));
}
-
-#[test]
-fn borderline_overflow() {
- let mut s = "0.".to_string();
- for _ in 0..375 {
- s.push('3');
- }
- // At the time of this writing, this returns Err(..), but this is a bug that should be fixed.
- // It makes no sense to enshrine that in a test, the important part is that it doesn't panic.
- let _ = s.parse::<f64>();
-}
diff --git a/library/core/tests/num/dec2flt/parse.rs b/library/core/tests/num/dec2flt/parse.rs
index edc77377d..4a5d24ba7 100644
--- a/library/core/tests/num/dec2flt/parse.rs
+++ b/library/core/tests/num/dec2flt/parse.rs
@@ -32,7 +32,7 @@ fn invalid_chars() {
}
fn parse_positive(s: &[u8]) -> Option<Number> {
- parse_number(s, false)
+ parse_number(s)
}
#[test]
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
index 39559cdbb..88f54591b 100644
--- a/library/core/tests/slice.rs
+++ b/library/core/tests/slice.rs
@@ -1,6 +1,7 @@
use core::cell::Cell;
use core::cmp::Ordering;
use core::mem::MaybeUninit;
+use core::num::NonZeroUsize;
use core::result::Result::{Err, Ok};
use core::slice;
@@ -142,20 +143,20 @@ fn test_iterator_advance_by() {
for i in 0..=v.len() {
let mut iter = v.iter();
- iter.advance_by(i).unwrap();
+ assert_eq!(iter.advance_by(i), Ok(()));
assert_eq!(iter.as_slice(), &v[i..]);
}
let mut iter = v.iter();
- assert_eq!(iter.advance_by(v.len() + 1), Err(v.len()));
+ assert_eq!(iter.advance_by(v.len() + 1), Err(NonZeroUsize::new(1).unwrap()));
assert_eq!(iter.as_slice(), &[]);
let mut iter = v.iter();
- iter.advance_by(3).unwrap();
+ assert_eq!(iter.advance_by(3), Ok(()));
assert_eq!(iter.as_slice(), &v[3..]);
- iter.advance_by(2).unwrap();
+ assert_eq!(iter.advance_by(2), Ok(()));
assert_eq!(iter.as_slice(), &[]);
- iter.advance_by(0).unwrap();
+ assert_eq!(iter.advance_by(0), Ok(()));
}
#[test]
@@ -164,20 +165,20 @@ fn test_iterator_advance_back_by() {
for i in 0..=v.len() {
let mut iter = v.iter();
- iter.advance_back_by(i).unwrap();
+ assert_eq!(iter.advance_back_by(i), Ok(()));
assert_eq!(iter.as_slice(), &v[..v.len() - i]);
}
let mut iter = v.iter();
- assert_eq!(iter.advance_back_by(v.len() + 1), Err(v.len()));
+ assert_eq!(iter.advance_back_by(v.len() + 1), Err(NonZeroUsize::new(1).unwrap()));
assert_eq!(iter.as_slice(), &[]);
let mut iter = v.iter();
- iter.advance_back_by(3).unwrap();
+ assert_eq!(iter.advance_back_by(3), Ok(()));
assert_eq!(iter.as_slice(), &v[..v.len() - 3]);
- iter.advance_back_by(2).unwrap();
+ assert_eq!(iter.advance_back_by(2), Ok(()));
assert_eq!(iter.as_slice(), &[]);
- iter.advance_back_by(0).unwrap();
+ assert_eq!(iter.advance_back_by(0), Ok(()));
}
#[test]
diff --git a/library/panic_abort/src/android.rs b/library/panic_abort/src/android.rs
index 0fd824f8a..20b5b6b51 100644
--- a/library/panic_abort/src/android.rs
+++ b/library/panic_abort/src/android.rs
@@ -15,7 +15,7 @@ type SetAbortMessageType = unsafe extern "C" fn(*const libc::c_char) -> ();
//
// Weakly resolve the symbol for android_set_abort_message. This function is only available
// for API >= 21.
-pub(crate) unsafe fn android_set_abort_message(payload: *mut &mut dyn BoxMeUp) {
+pub(crate) unsafe fn android_set_abort_message(payload: &mut dyn BoxMeUp) {
let func_addr =
libc::dlsym(libc::RTLD_DEFAULT, ANDROID_SET_ABORT_MESSAGE.as_ptr() as *const libc::c_char)
as usize;
@@ -23,7 +23,7 @@ pub(crate) unsafe fn android_set_abort_message(payload: *mut &mut dyn BoxMeUp) {
return;
}
- let payload = (*payload).get();
+ let payload = payload.get();
let msg = match payload.downcast_ref::<&'static str>() {
Some(msg) => msg.as_bytes(),
None => match payload.downcast_ref::<String>() {
diff --git a/library/panic_abort/src/lib.rs b/library/panic_abort/src/lib.rs
index a3cebf99c..b193d79b0 100644
--- a/library/panic_abort/src/lib.rs
+++ b/library/panic_abort/src/lib.rs
@@ -29,7 +29,7 @@ pub unsafe extern "C" fn __rust_panic_cleanup(_: *mut u8) -> *mut (dyn Any + Sen
// "Leak" the payload and shim to the relevant abort on the platform in question.
#[rustc_std_internal_symbol]
-pub unsafe fn __rust_start_panic(_payload: *mut &mut dyn BoxMeUp) -> u32 {
+pub unsafe fn __rust_start_panic(_payload: &mut dyn BoxMeUp) -> u32 {
// Android has the ability to attach a message as part of the abort.
#[cfg(target_os = "android")]
android::android_set_abort_message(_payload);
diff --git a/library/panic_unwind/src/lib.rs b/library/panic_unwind/src/lib.rs
index ea3c9a7a6..ce78ab82e 100644
--- a/library/panic_unwind/src/lib.rs
+++ b/library/panic_unwind/src/lib.rs
@@ -99,8 +99,8 @@ pub unsafe extern "C" fn __rust_panic_cleanup(payload: *mut u8) -> *mut (dyn Any
// Entry point for raising an exception, just delegates to the platform-specific
// implementation.
#[rustc_std_internal_symbol]
-pub unsafe fn __rust_start_panic(payload: *mut &mut dyn BoxMeUp) -> u32 {
- let payload = Box::from_raw((*payload).take_box());
+pub unsafe fn __rust_start_panic(payload: &mut dyn BoxMeUp) -> u32 {
+ let payload = Box::from_raw(payload.take_box());
imp::panic(payload)
}
diff --git a/library/portable-simd/crates/core_simd/src/masks/full_masks.rs b/library/portable-simd/crates/core_simd/src/masks/full_masks.rs
index adf0fcbea..b5ba198e5 100644
--- a/library/portable-simd/crates/core_simd/src/masks/full_masks.rs
+++ b/library/portable-simd/crates/core_simd/src/masks/full_masks.rs
@@ -257,7 +257,7 @@ where
}
}
-impl<T, const LANES: usize> core::convert::From<Mask<T, LANES>> for Simd<T, LANES>
+impl<T, const LANES: usize> From<Mask<T, LANES>> for Simd<T, LANES>
where
T: MaskElement,
LaneCount<LANES>: SupportedLaneCount,
diff --git a/library/proc_macro/src/bridge/fxhash.rs b/library/proc_macro/src/bridge/fxhash.rs
index 17bd0a1b3..f4e905441 100644
--- a/library/proc_macro/src/bridge/fxhash.rs
+++ b/library/proc_macro/src/bridge/fxhash.rs
@@ -5,8 +5,6 @@
//! on the `rustc_hash` crate.
use std::collections::HashMap;
-use std::convert::TryInto;
-use std::default::Default;
use std::hash::BuildHasherDefault;
use std::hash::Hasher;
use std::mem::size_of;
diff --git a/library/proc_macro/src/lib.rs b/library/proc_macro/src/lib.rs
index 938935771..9d081c8b8 100644
--- a/library/proc_macro/src/lib.rs
+++ b/library/proc_macro/src/lib.rs
@@ -47,7 +47,7 @@ use std::cmp::Ordering;
use std::ops::RangeBounds;
use std::path::PathBuf;
use std::str::FromStr;
-use std::{error, fmt, iter};
+use std::{error, fmt};
/// Determines whether proc_macro has been made accessible to the currently
/// running program.
@@ -310,7 +310,7 @@ impl ConcatStreamsHelper {
/// Collects a number of token trees into a single stream.
#[stable(feature = "proc_macro_lib2", since = "1.29.0")]
-impl iter::FromIterator<TokenTree> for TokenStream {
+impl FromIterator<TokenTree> for TokenStream {
fn from_iter<I: IntoIterator<Item = TokenTree>>(trees: I) -> Self {
let iter = trees.into_iter();
let mut builder = ConcatTreesHelper::new(iter.size_hint().0);
@@ -322,7 +322,7 @@ impl iter::FromIterator<TokenTree> for TokenStream {
/// A "flattening" operation on token streams, collects token trees
/// from multiple token streams into a single stream.
#[stable(feature = "proc_macro_lib", since = "1.15.0")]
-impl iter::FromIterator<TokenStream> for TokenStream {
+impl FromIterator<TokenStream> for TokenStream {
fn from_iter<I: IntoIterator<Item = TokenStream>>(streams: I) -> Self {
let iter = streams.into_iter();
let mut builder = ConcatStreamsHelper::new(iter.size_hint().0);
diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml
index 598a4bf92..96c75f97f 100644
--- a/library/std/Cargo.toml
+++ b/library/std/Cargo.toml
@@ -15,8 +15,8 @@ cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] }
panic_unwind = { path = "../panic_unwind", optional = true }
panic_abort = { path = "../panic_abort" }
core = { path = "../core" }
-libc = { version = "0.2.139", default-features = false, features = ['rustc-dep-of-std'] }
-compiler_builtins = { version = "0.1.87" }
+libc = { version = "0.2.140", default-features = false, features = ['rustc-dep-of-std'] }
+compiler_builtins = { version = "0.1.91" }
profiler_builtins = { path = "../profiler_builtins", optional = true }
unwind = { path = "../unwind" }
hashbrown = { version = "0.12", default-features = false, features = ['rustc-dep-of-std'] }
diff --git a/library/std/build.rs b/library/std/build.rs
index ea8796675..cf708db6f 100644
--- a/library/std/build.rs
+++ b/library/std/build.rs
@@ -6,6 +6,9 @@ fn main() {
if target.contains("freebsd") {
if env::var("RUST_STD_FREEBSD_12_ABI").is_ok() {
println!("cargo:rustc-cfg=freebsd12");
+ } else if env::var("RUST_STD_FREEBSD_13_ABI").is_ok() {
+ println!("cargo:rustc-cfg=freebsd12");
+ println!("cargo:rustc-cfg=freebsd13");
}
} else if target.contains("linux")
|| target.contains("netbsd")
diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs
index 742c4cc7c..3afc8287e 100644
--- a/library/std/src/collections/hash/map.rs
+++ b/library/std/src/collections/hash/map.rs
@@ -1446,7 +1446,6 @@ impl<'a, K, V> IterMut<'a, K, V> {
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: IntoIterator::into_iter
-/// [`IntoIterator`]: crate::iter::IntoIterator
///
/// # Example
///
diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs
index b59f89d32..ac906e682 100644
--- a/library/std/src/collections/hash/set.rs
+++ b/library/std/src/collections/hash/set.rs
@@ -12,13 +12,6 @@ use crate::ops::{BitAnd, BitOr, BitXor, Sub};
use super::map::{map_try_reserve_error, RandomState};
-// Future Optimization (FIXME!)
-// ============================
-//
-// Iteration over zero sized values is a noop. There is no need
-// for `bucket.val` in the case of HashSet. I suppose we would need HKT
-// to get rid of it properly.
-
/// A [hash set] implemented as a `HashMap` where the value is `()`.
///
/// As with the [`HashMap`] type, a `HashSet` requires that the elements
@@ -1279,7 +1272,6 @@ pub struct Iter<'a, K: 'a> {
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: IntoIterator::into_iter
-/// [`IntoIterator`]: crate::iter::IntoIterator
///
/// # Examples
///
diff --git a/library/std/src/collections/mod.rs b/library/std/src/collections/mod.rs
index ae2baba09..42f738acb 100644
--- a/library/std/src/collections/mod.rs
+++ b/library/std/src/collections/mod.rs
@@ -172,7 +172,8 @@
//!
//! ## Iterators
//!
-//! Iterators are a powerful and robust mechanism used throughout Rust's
+//! [Iterators][crate::iter]
+//! are a powerful and robust mechanism used throughout Rust's
//! standard libraries. Iterators provide a sequence of values in a generic,
//! safe, efficient and convenient way. The contents of an iterator are usually
//! *lazily* evaluated, so that only the values that are actually needed are
@@ -252,7 +253,9 @@
//!
//! Several other collection methods also return iterators to yield a sequence
//! of results but avoid allocating an entire collection to store the result in.
-//! This provides maximum flexibility as `collect` or `extend` can be called to
+//! This provides maximum flexibility as
+//! [`collect`][crate::iter::Iterator::collect] or
+//! [`extend`][crate::iter::Extend::extend] can be called to
//! "pipe" the sequence into any collection if desired. Otherwise, the sequence
//! can be looped over with a `for` loop. The iterator can also be discarded
//! after partial use, preventing the computation of the unused items.
@@ -395,8 +398,6 @@
//! // ...but the key hasn't changed. b is still "baz", not "xyz".
//! assert_eq!(map.keys().next().unwrap().b, "baz");
//! ```
-//!
-//! [IntoIterator]: crate::iter::IntoIterator "iter::IntoIterator"
#![stable(feature = "rust1", since = "1.0.0")]
@@ -416,8 +417,10 @@ pub use alloc_crate::collections::{BTreeMap, BTreeSet, BinaryHeap};
pub use alloc_crate::collections::{LinkedList, VecDeque};
#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(inline)]
pub use self::hash_map::HashMap;
#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(inline)]
pub use self::hash_set::HashSet;
#[stable(feature = "try_reserve", since = "1.57.0")]
diff --git a/library/std/src/env.rs b/library/std/src/env.rs
index 183f9ab3b..d372fa640 100644
--- a/library/std/src/env.rs
+++ b/library/std/src/env.rs
@@ -236,21 +236,14 @@ fn _var(key: &OsStr) -> Result<String, VarError> {
}
/// Fetches the environment variable `key` from the current process, returning
-/// [`None`] if the variable isn't set or there's another error.
+/// [`None`] if the variable isn't set or if there is another error.
///
-/// Note that the method will not check if the environment variable
-/// is valid Unicode. If you want to have an error on invalid UTF-8,
-/// use the [`var`] function instead.
-///
-/// # Errors
-///
-/// This function returns an error if the environment variable isn't set.
-///
-/// This function may return an error if the environment variable's name contains
+/// It may return `None` if the environment variable's name contains
/// the equal sign character (`=`) or the NUL character.
///
-/// This function may return an error if the environment variable's value contains
-/// the NUL character.
+/// Note that this function will not check if the environment variable
+/// is valid Unicode. If you want to have an error on invalid UTF-8,
+/// use the [`var`] function instead.
///
/// # Examples
///
@@ -895,6 +888,7 @@ pub mod consts {
/// - x86_64
/// - arm
/// - aarch64
+ /// - loongarch64
/// - m68k
/// - mips
/// - mips64
diff --git a/library/std/src/f32.rs b/library/std/src/f32.rs
index 6b1f0cba8..408244b2c 100644
--- a/library/std/src/f32.rs
+++ b/library/std/src/f32.rs
@@ -78,10 +78,14 @@ impl f32 {
/// let f = 3.3_f32;
/// let g = -3.3_f32;
/// let h = -3.7_f32;
+ /// let i = 3.5_f32;
+ /// let j = 4.5_f32;
///
/// assert_eq!(f.round(), 3.0);
/// assert_eq!(g.round(), -3.0);
/// assert_eq!(h.round(), -4.0);
+ /// assert_eq!(i.round(), 4.0);
+ /// assert_eq!(j.round(), 5.0);
/// ```
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
@@ -91,6 +95,32 @@ impl f32 {
unsafe { intrinsics::roundf32(self) }
}
+ /// Returns the nearest integer to a number. Rounds half-way cases to the number
+ /// with an even least significant digit.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(round_ties_even)]
+ ///
+ /// let f = 3.3_f32;
+ /// let g = -3.3_f32;
+ /// let h = 3.5_f32;
+ /// let i = 4.5_f32;
+ ///
+ /// assert_eq!(f.round_ties_even(), 3.0);
+ /// assert_eq!(g.round_ties_even(), -3.0);
+ /// assert_eq!(h.round_ties_even(), 4.0);
+ /// assert_eq!(i.round_ties_even(), 4.0);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[unstable(feature = "round_ties_even", issue = "96710")]
+ #[inline]
+ pub fn round_ties_even(self) -> f32 {
+ unsafe { intrinsics::rintf32(self) }
+ }
+
/// Returns the integer part of `self`.
/// This means that non-integer numbers are always truncated towards zero.
///
@@ -551,8 +581,10 @@ impl f32 {
unsafe { cmath::cbrtf(self) }
}
- /// Calculates the length of the hypotenuse of a right-angle triangle given
- /// legs of length `x` and `y`.
+ /// Compute the distance between the origin and a point (`x`, `y`) on the
+ /// Euclidean plane. Equivalently, compute the length of the hypotenuse of a
+ /// right-angle triangle with other sides having length `x.abs()` and
+ /// `y.abs()`.
///
/// # Examples
///
diff --git a/library/std/src/f32/tests.rs b/library/std/src/f32/tests.rs
index 6ee295de6..e949def00 100644
--- a/library/std/src/f32/tests.rs
+++ b/library/std/src/f32/tests.rs
@@ -209,6 +209,7 @@ fn test_ceil() {
#[test]
fn test_round() {
+ assert_approx_eq!(2.5f32.round(), 3.0f32);
assert_approx_eq!(1.0f32.round(), 1.0f32);
assert_approx_eq!(1.3f32.round(), 1.0f32);
assert_approx_eq!(1.5f32.round(), 2.0f32);
@@ -222,6 +223,21 @@ fn test_round() {
}
#[test]
+fn test_round_ties_even() {
+ assert_approx_eq!(2.5f32.round_ties_even(), 2.0f32);
+ assert_approx_eq!(1.0f32.round_ties_even(), 1.0f32);
+ assert_approx_eq!(1.3f32.round_ties_even(), 1.0f32);
+ assert_approx_eq!(1.5f32.round_ties_even(), 2.0f32);
+ assert_approx_eq!(1.7f32.round_ties_even(), 2.0f32);
+ assert_approx_eq!(0.0f32.round_ties_even(), 0.0f32);
+ assert_approx_eq!((-0.0f32).round_ties_even(), -0.0f32);
+ assert_approx_eq!((-1.0f32).round_ties_even(), -1.0f32);
+ assert_approx_eq!((-1.3f32).round_ties_even(), -1.0f32);
+ assert_approx_eq!((-1.5f32).round_ties_even(), -2.0f32);
+ assert_approx_eq!((-1.7f32).round_ties_even(), -2.0f32);
+}
+
+#[test]
fn test_trunc() {
assert_approx_eq!(1.0f32.trunc(), 1.0f32);
assert_approx_eq!(1.3f32.trunc(), 1.0f32);
diff --git a/library/std/src/f64.rs b/library/std/src/f64.rs
index 16359766b..6782b861f 100644
--- a/library/std/src/f64.rs
+++ b/library/std/src/f64.rs
@@ -78,10 +78,14 @@ impl f64 {
/// let f = 3.3_f64;
/// let g = -3.3_f64;
/// let h = -3.7_f64;
+ /// let i = 3.5_f64;
+ /// let j = 4.5_f64;
///
/// assert_eq!(f.round(), 3.0);
/// assert_eq!(g.round(), -3.0);
/// assert_eq!(h.round(), -4.0);
+ /// assert_eq!(i.round(), 4.0);
+ /// assert_eq!(j.round(), 5.0);
/// ```
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
@@ -91,6 +95,32 @@ impl f64 {
unsafe { intrinsics::roundf64(self) }
}
+ /// Returns the nearest integer to a number. Rounds half-way cases to the number
+ /// with an even least significant digit.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(round_ties_even)]
+ ///
+ /// let f = 3.3_f64;
+ /// let g = -3.3_f64;
+ /// let h = 3.5_f64;
+ /// let i = 4.5_f64;
+ ///
+ /// assert_eq!(f.round_ties_even(), 3.0);
+ /// assert_eq!(g.round_ties_even(), -3.0);
+ /// assert_eq!(h.round_ties_even(), 4.0);
+ /// assert_eq!(i.round_ties_even(), 4.0);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "method returns a new number and does not mutate the original value"]
+ #[unstable(feature = "round_ties_even", issue = "96710")]
+ #[inline]
+ pub fn round_ties_even(self) -> f64 {
+ unsafe { intrinsics::rintf64(self) }
+ }
+
/// Returns the integer part of `self`.
/// This means that non-integer numbers are always truncated towards zero.
///
@@ -553,8 +583,10 @@ impl f64 {
unsafe { cmath::cbrt(self) }
}
- /// Calculates the length of the hypotenuse of a right-angle triangle given
- /// legs of length `x` and `y`.
+ /// Compute the distance between the origin and a point (`x`, `y`) on the
+ /// Euclidean plane. Equivalently, compute the length of the hypotenuse of a
+ /// right-angle triangle with other sides having length `x.abs()` and
+ /// `y.abs()`.
///
/// # Examples
///
diff --git a/library/std/src/f64/tests.rs b/library/std/src/f64/tests.rs
index 5b039d445..53d351cce 100644
--- a/library/std/src/f64/tests.rs
+++ b/library/std/src/f64/tests.rs
@@ -199,6 +199,7 @@ fn test_ceil() {
#[test]
fn test_round() {
+ assert_approx_eq!(2.5f64.round(), 3.0f64);
assert_approx_eq!(1.0f64.round(), 1.0f64);
assert_approx_eq!(1.3f64.round(), 1.0f64);
assert_approx_eq!(1.5f64.round(), 2.0f64);
@@ -212,6 +213,21 @@ fn test_round() {
}
#[test]
+fn test_round_ties_even() {
+ assert_approx_eq!(2.5f64.round_ties_even(), 2.0f64);
+ assert_approx_eq!(1.0f64.round_ties_even(), 1.0f64);
+ assert_approx_eq!(1.3f64.round_ties_even(), 1.0f64);
+ assert_approx_eq!(1.5f64.round_ties_even(), 2.0f64);
+ assert_approx_eq!(1.7f64.round_ties_even(), 2.0f64);
+ assert_approx_eq!(0.0f64.round_ties_even(), 0.0f64);
+ assert_approx_eq!((-0.0f64).round_ties_even(), -0.0f64);
+ assert_approx_eq!((-1.0f64).round_ties_even(), -1.0f64);
+ assert_approx_eq!((-1.3f64).round_ties_even(), -1.0f64);
+ assert_approx_eq!((-1.5f64).round_ties_even(), -2.0f64);
+ assert_approx_eq!((-1.7f64).round_ties_even(), -2.0f64);
+}
+
+#[test]
fn test_trunc() {
assert_approx_eq!(1.0f64.trunc(), 1.0f64);
assert_approx_eq!(1.3f64.trunc(), 1.0f64);
diff --git a/library/std/src/ffi/os_str.rs b/library/std/src/ffi/os_str.rs
index 80ed34157..5c0541d3c 100644
--- a/library/std/src/ffi/os_str.rs
+++ b/library/std/src/ffi/os_str.rs
@@ -6,7 +6,6 @@ use crate::cmp;
use crate::collections::TryReserveError;
use crate::fmt;
use crate::hash::{Hash, Hasher};
-use crate::iter::Extend;
use crate::ops;
use crate::rc::Rc;
use crate::str::FromStr;
diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs
index 909d9bf40..401def184 100644
--- a/library/std/src/fs/tests.rs
+++ b/library/std/src/fs/tests.rs
@@ -2,7 +2,8 @@ use crate::io::prelude::*;
use crate::env;
use crate::fs::{self, File, OpenOptions};
-use crate::io::{ErrorKind, SeekFrom};
+use crate::io::{BorrowedBuf, ErrorKind, SeekFrom};
+use crate::mem::MaybeUninit;
use crate::path::Path;
use crate::str;
use crate::sync::Arc;
@@ -402,6 +403,23 @@ fn file_test_io_seek_read_write() {
}
#[test]
+fn file_test_read_buf() {
+ let tmpdir = tmpdir();
+ let filename = &tmpdir.join("test");
+ check!(fs::write(filename, &[1, 2, 3, 4]));
+
+ let mut buf: [MaybeUninit<u8>; 128] = MaybeUninit::uninit_array();
+ let mut buf = BorrowedBuf::from(buf.as_mut_slice());
+ let mut file = check!(File::open(filename));
+ check!(file.read_buf(buf.unfilled()));
+ assert_eq!(buf.filled(), &[1, 2, 3, 4]);
+ // File::read_buf should omit buffer initialization.
+ assert_eq!(buf.init_len(), 4);
+
+ check!(fs::remove_file(filename));
+}
+
+#[test]
fn file_test_stat_is_correct_on_is_file() {
let tmpdir = tmpdir();
let filename = &tmpdir.join("file_stat_correct_on_is_file.txt");
diff --git a/library/std/src/io/buffered/bufwriter.rs b/library/std/src/io/buffered/bufwriter.rs
index 6acb937e7..14c455d4f 100644
--- a/library/std/src/io/buffered/bufwriter.rs
+++ b/library/std/src/io/buffered/bufwriter.rs
@@ -339,7 +339,7 @@ impl<W: Write> BufWriter<W> {
let buf = if !self.panicked { Ok(buf) } else { Err(WriterPanicked { buf }) };
// SAFETY: forget(self) prevents double dropping inner
- let inner = unsafe { ptr::read(&mut self.inner) };
+ let inner = unsafe { ptr::read(&self.inner) };
mem::forget(self);
(inner, buf)
diff --git a/library/std/src/io/cursor.rs b/library/std/src/io/cursor.rs
index d98ab021c..25c64240e 100644
--- a/library/std/src/io/cursor.rs
+++ b/library/std/src/io/cursor.rs
@@ -34,7 +34,7 @@ use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, SeekFrom};
/// use std::fs::File;
///
/// // a library function we've written
-/// fn write_ten_bytes_at_end<W: Write + Seek>(writer: &mut W) -> io::Result<()> {
+/// fn write_ten_bytes_at_end<W: Write + Seek>(mut writer: W) -> io::Result<()> {
/// writer.seek(SeekFrom::End(-10))?;
///
/// for i in 0..10 {
diff --git a/library/std/src/io/error.rs b/library/std/src/io/error.rs
index 7f07e4fdd..34c0ce9dc 100644
--- a/library/std/src/io/error.rs
+++ b/library/std/src/io/error.rs
@@ -11,7 +11,6 @@ mod repr_unpacked;
#[cfg(not(target_pointer_width = "64"))]
use repr_unpacked::Repr;
-use crate::convert::From;
use crate::error;
use crate::fmt;
use crate::result;
@@ -370,7 +369,7 @@ pub enum ErrorKind {
// "Unusual" error kinds which do not correspond simply to (sets
// of) OS error codes, should be added just above this comment.
- // `Other` and `Uncategorised` should remain at the end:
+ // `Other` and `Uncategorized` should remain at the end:
//
/// A custom error that does not fall under any other I/O error kind.
///
@@ -882,6 +881,13 @@ impl Error {
/// Returns the corresponding [`ErrorKind`] for this error.
///
+ /// This may be a value set by Rust code constructing custom `io::Error`s,
+ /// or if this `io::Error` was sourced from the operating system,
+ /// it will be a value inferred from the system's error encoding.
+ /// See [`last_os_error`] for more details.
+ ///
+ /// [`last_os_error`]: Error::last_os_error
+ ///
/// # Examples
///
/// ```
@@ -892,7 +898,8 @@ impl Error {
/// }
///
/// fn main() {
- /// // Will print "Uncategorized".
+ /// // As no error has (visibly) occurred, this may print anything!
+ /// // It likely prints a placeholder for unidentified (non-)errors.
/// print_error(Error::last_os_error());
/// // Will print "AddrInUse".
/// print_error(Error::new(ErrorKind::AddrInUse, "oh no!"));
diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs
index b2b6d8613..ea66d0409 100644
--- a/library/std/src/io/mod.rs
+++ b/library/std/src/io/mod.rs
@@ -268,7 +268,7 @@ pub(crate) use self::stdio::attempt_print_to_stderr;
#[unstable(feature = "internal_output_capture", issue = "none")]
#[doc(no_inline, hidden)]
pub use self::stdio::set_output_capture;
-#[unstable(feature = "is_terminal", issue = "98070")]
+#[stable(feature = "is_terminal", since = "1.70.0")]
pub use self::stdio::IsTerminal;
#[unstable(feature = "print_internals", issue = "none")]
pub use self::stdio::{_eprint, _print};
@@ -823,8 +823,22 @@ pub trait Read {
/// Read the exact number of bytes required to fill `cursor`.
///
- /// This is equivalent to the [`read_exact`](Read::read_exact) method, except that it is passed a [`BorrowedCursor`] rather than `[u8]` to
- /// allow use with uninitialized buffers.
+ /// This is similar to the [`read_exact`](Read::read_exact) method, except
+ /// that it is passed a [`BorrowedCursor`] rather than `[u8]` to allow use
+ /// with uninitialized buffers.
+ ///
+ /// # Errors
+ ///
+ /// If this function encounters an error of the kind [`ErrorKind::Interrupted`]
+ /// then the error is ignored and the operation will continue.
+ ///
+ /// If this function encounters an "end of file" before completely filling
+ /// the buffer, it returns an error of the kind [`ErrorKind::UnexpectedEof`].
+ ///
+ /// If any other read error is encountered then this function immediately
+ /// returns.
+ ///
+ /// If this function returns an error, all bytes read will be appended to `cursor`.
#[unstable(feature = "read_buf", issue = "78485")]
fn read_buf_exact(&mut self, mut cursor: BorrowedCursor<'_>) -> Result<()> {
while cursor.capacity() > 0 {
diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs
index 14bfef4c7..9098d36ee 100644
--- a/library/std/src/io/stdio.rs
+++ b/library/std/src/io/stdio.rs
@@ -8,7 +8,7 @@ use crate::io::prelude::*;
use crate::cell::{Cell, RefCell};
use crate::fmt;
use crate::fs::File;
-use crate::io::{self, BufReader, IoSlice, IoSliceMut, LineWriter, Lines};
+use crate::io::{self, BorrowedCursor, BufReader, IoSlice, IoSliceMut, LineWriter, Lines};
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::{Arc, Mutex, MutexGuard, OnceLock, ReentrantMutex, ReentrantMutexGuard};
use crate::sys::stdio;
@@ -97,6 +97,10 @@ impl Read for StdinRaw {
handle_ebadf(self.0.read(buf), 0)
}
+ fn read_buf(&mut self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ handle_ebadf(self.0.read_buf(buf), ())
+ }
+
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
handle_ebadf(self.0.read_vectored(bufs), 0)
}
@@ -418,6 +422,9 @@ impl Read for Stdin {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.lock().read(buf)
}
+ fn read_buf(&mut self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.lock().read_buf(buf)
+ }
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.lock().read_vectored(bufs)
}
@@ -450,6 +457,10 @@ impl Read for StdinLock<'_> {
self.inner.read(buf)
}
+ fn read_buf(&mut self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.inner.read_buf(buf)
+ }
+
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.read_vectored(bufs)
}
@@ -1036,13 +1047,23 @@ pub(crate) fn attempt_print_to_stderr(args: fmt::Arguments<'_>) {
}
/// Trait to determine if a descriptor/handle refers to a terminal/tty.
-#[unstable(feature = "is_terminal", issue = "98070")]
+#[stable(feature = "is_terminal", since = "1.70.0")]
pub trait IsTerminal: crate::sealed::Sealed {
/// Returns `true` if the descriptor/handle refers to a terminal/tty.
///
/// On platforms where Rust does not know how to detect a terminal yet, this will return
/// `false`. This will also return `false` if an unexpected error occurred, such as from
/// passing an invalid file descriptor.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// On Windows, in addition to detecting consoles, this currently uses some heuristics to
+ /// detect older msys/cygwin/mingw pseudo-terminals based on device name: devices with names
+ /// starting with `msys-` or `cygwin-` and ending in `-pty` will be considered terminals.
+ /// Note that this [may change in the future][changes].
+ ///
+ /// [changes]: io#platform-specific-behavior
+ #[stable(feature = "is_terminal", since = "1.70.0")]
fn is_terminal(&self) -> bool;
}
@@ -1051,7 +1072,7 @@ macro_rules! impl_is_terminal {
#[unstable(feature = "sealed", issue = "none")]
impl crate::sealed::Sealed for $t {}
- #[unstable(feature = "is_terminal", issue = "98070")]
+ #[stable(feature = "is_terminal", since = "1.70.0")]
impl IsTerminal for $t {
#[inline]
fn is_terminal(&self) -> bool {
diff --git a/library/std/src/keyword_docs.rs b/library/std/src/keyword_docs.rs
index 203c490fa..43842bee9 100644
--- a/library/std/src/keyword_docs.rs
+++ b/library/std/src/keyword_docs.rs
@@ -1678,7 +1678,7 @@ mod super_keyword {}
/// below `Iterator` is a **supertrait** and `ThreeIterator` is a **subtrait**:
///
/// ```rust
-/// trait ThreeIterator: std::iter::Iterator {
+/// trait ThreeIterator: Iterator {
/// fn next_three(&mut self) -> Option<[Self::Item; 3]>;
/// }
/// ```
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index b62f3ad29..98fcc76aa 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -235,6 +235,7 @@
#![cfg_attr(windows, feature(round_char_boundary))]
//
// Language features:
+// tidy-alphabetical-start
#![feature(alloc_error_handler)]
#![feature(allocator_internals)]
#![feature(allow_internal_unsafe)]
@@ -254,11 +255,10 @@
#![feature(exhaustive_patterns)]
#![feature(if_let_guard)]
#![feature(intra_doc_pointers)]
-#![feature(is_terminal)]
#![feature(lang_items)]
#![feature(let_chains)]
-#![feature(linkage)]
#![feature(link_cfg)]
+#![feature(linkage)]
#![feature(min_specialization)]
#![feature(must_not_suspend)]
#![feature(needs_panic_runtime)]
@@ -272,9 +272,10 @@
#![feature(thread_local)]
#![feature(try_blocks)]
#![feature(utf8_chunks)]
+// tidy-alphabetical-end
//
// Library features (core):
-#![feature(atomic_mut_ptr)]
+// tidy-alphabetical-start
#![feature(char_internals)]
#![feature(core_intrinsics)]
#![feature(duration_constants)]
@@ -290,10 +291,9 @@
#![feature(hashmap_internals)]
#![feature(ip)]
#![feature(ip_in_core)]
-#![feature(is_some_and)]
#![feature(maybe_uninit_slice)]
+#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_write_slice)]
-#![feature(nonnull_slice_from_raw_parts)]
#![feature(panic_can_unwind)]
#![feature(panic_info_message)]
#![feature(panic_internals)]
@@ -304,30 +304,34 @@
#![feature(provide_any)]
#![feature(ptr_as_uninit)]
#![feature(raw_os_nonzero)]
+#![feature(round_ties_even)]
#![feature(slice_internals)]
#![feature(slice_ptr_get)]
#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
-#![feature(maybe_uninit_uninit_array)]
-#![feature(const_maybe_uninit_uninit_array)]
-#![feature(const_waker)]
+// tidy-alphabetical-end
//
// Library features (alloc):
+// tidy-alphabetical-start
#![feature(alloc_layout_extra)]
#![feature(allocator_api)]
#![feature(get_mut_unchecked)]
#![feature(map_try_insert)]
#![feature(new_uninit)]
+#![feature(slice_concat_trait)]
#![feature(thin_box)]
#![feature(try_reserve_kind)]
#![feature(vec_into_raw_parts)]
-#![feature(slice_concat_trait)]
+// tidy-alphabetical-end
//
// Library features (unwind):
+// tidy-alphabetical-start
#![feature(panic_unwind)]
+// tidy-alphabetical-end
//
// Only for re-exporting:
+// tidy-alphabetical-start
#![feature(assert_matches)]
#![feature(async_iterator)]
#![feature(c_variadic)]
@@ -339,24 +343,29 @@
#![feature(custom_test_frameworks)]
#![feature(edition_panic)]
#![feature(format_args_nl)]
+#![feature(get_many_mut)]
+#![feature(lazy_cell)]
#![feature(log_syntax)]
-#![feature(once_cell)]
#![feature(saturating_int_impl)]
#![feature(stdsimd)]
#![feature(test)]
#![feature(trace_macros)]
-#![feature(get_many_mut)]
+// tidy-alphabetical-end
//
// Only used in tests/benchmarks:
//
// Only for const-ness:
+// tidy-alphabetical-start
#![feature(const_collections_with_hasher)]
#![feature(const_hash)]
#![feature(const_io_structs)]
#![feature(const_ip)]
#![feature(const_ipv4)]
#![feature(const_ipv6)]
+#![feature(const_maybe_uninit_uninit_array)]
+#![feature(const_waker)]
#![feature(thread_local_internals)]
+// tidy-alphabetical-end
//
#![default_lib_allocator]
diff --git a/library/std/src/net/tcp.rs b/library/std/src/net/tcp.rs
index ac09a8059..4b42ad65e 100644
--- a/library/std/src/net/tcp.rs
+++ b/library/std/src/net/tcp.rs
@@ -6,7 +6,7 @@ mod tests;
use crate::io::prelude::*;
use crate::fmt;
-use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut};
use crate::iter::FusedIterator;
use crate::net::{Shutdown, SocketAddr, ToSocketAddrs};
use crate::sys_common::net as net_imp;
@@ -619,6 +619,10 @@ impl Read for TcpStream {
self.0.read(buf)
}
+ fn read_buf(&mut self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.0.read_buf(buf)
+ }
+
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.0.read_vectored(bufs)
}
@@ -653,6 +657,10 @@ impl Read for &TcpStream {
self.0.read(buf)
}
+ fn read_buf(&mut self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.0.read_buf(buf)
+ }
+
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.0.read_vectored(bufs)
}
@@ -861,7 +869,7 @@ impl TcpListener {
/// use std::net::{TcpListener, TcpStream};
///
/// fn listen_on(port: u16) -> impl Iterator<Item = TcpStream> {
- /// let listener = TcpListener::bind("127.0.0.1:80").unwrap();
+ /// let listener = TcpListener::bind(("127.0.0.1", port)).unwrap();
/// listener.into_incoming()
/// .filter_map(Result::ok) /* Ignore failed connections */
/// }
diff --git a/library/std/src/net/tcp/tests.rs b/library/std/src/net/tcp/tests.rs
index e019bc0b6..7a3c66e45 100644
--- a/library/std/src/net/tcp/tests.rs
+++ b/library/std/src/net/tcp/tests.rs
@@ -1,6 +1,7 @@
use crate::fmt;
use crate::io::prelude::*;
-use crate::io::{ErrorKind, IoSlice, IoSliceMut};
+use crate::io::{BorrowedBuf, ErrorKind, IoSlice, IoSliceMut};
+use crate::mem::MaybeUninit;
use crate::net::test::{next_test_ip4, next_test_ip6};
use crate::net::*;
use crate::sync::mpsc::channel;
@@ -280,6 +281,31 @@ fn partial_read() {
}
#[test]
+fn read_buf() {
+ each_ip(&mut |addr| {
+ let srv = t!(TcpListener::bind(&addr));
+ let t = thread::spawn(move || {
+ let mut s = t!(TcpStream::connect(&addr));
+ s.write_all(&[1, 2, 3, 4]).unwrap();
+ });
+
+ let mut s = t!(srv.accept()).0;
+ let mut buf: [MaybeUninit<u8>; 128] = MaybeUninit::uninit_array();
+ let mut buf = BorrowedBuf::from(buf.as_mut_slice());
+ t!(s.read_buf(buf.unfilled()));
+ assert_eq!(buf.filled(), &[1, 2, 3, 4]);
+
+ // FIXME: sgx uses default_read_buf that initializes the buffer.
+ if cfg!(not(target_env = "sgx")) {
+ // TcpStream::read_buf should omit buffer initialization.
+ assert_eq!(buf.init_len(), 4);
+ }
+
+ t.join().ok().expect("thread panicked");
+ })
+}
+
+#[test]
fn read_vectored() {
each_ip(&mut |addr| {
let srv = t!(TcpListener::bind(&addr));
diff --git a/library/std/src/os/android/net.rs b/library/std/src/os/android/net.rs
index 7cecd1bbf..fe40d6319 100644
--- a/library/std/src/os/android/net.rs
+++ b/library/std/src/os/android/net.rs
@@ -1,8 +1,8 @@
//! Android-specific networking functionality.
-#![unstable(feature = "tcp_quickack", issue = "96256")]
+#![stable(feature = "unix_socket_abstract", since = "1.70.0")]
-#[unstable(feature = "unix_socket_abstract", issue = "85410")]
+#[stable(feature = "unix_socket_abstract", since = "1.70.0")]
pub use crate::os::net::linux_ext::addr::SocketAddrExt;
#[unstable(feature = "tcp_quickack", issue = "96256")]
diff --git a/library/std/src/os/fd/owned.rs b/library/std/src/os/fd/owned.rs
index 99a4e0b51..2180d2974 100644
--- a/library/std/src/os/fd/owned.rs
+++ b/library/std/src/os/fd/owned.rs
@@ -201,7 +201,7 @@ macro_rules! impl_is_terminal {
#[unstable(feature = "sealed", issue = "none")]
impl crate::sealed::Sealed for $t {}
- #[unstable(feature = "is_terminal", issue = "98070")]
+ #[stable(feature = "is_terminal", since = "1.70.0")]
impl crate::io::IsTerminal for $t {
#[inline]
fn is_terminal(&self) -> bool {
@@ -268,7 +268,7 @@ impl AsFd for OwnedFd {
#[inline]
fn as_fd(&self) -> BorrowedFd<'_> {
// Safety: `OwnedFd` and `BorrowedFd` have the same validity
- // invariants, and the `BorrowdFd` is bounded by the lifetime
+ // invariants, and the `BorrowedFd` is bounded by the lifetime
// of `&self`.
unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) }
}
diff --git a/library/std/src/os/linux/net.rs b/library/std/src/os/linux/net.rs
index 94081c8dd..c8e734d74 100644
--- a/library/std/src/os/linux/net.rs
+++ b/library/std/src/os/linux/net.rs
@@ -1,8 +1,8 @@
//! Linux-specific networking functionality.
-#![unstable(feature = "tcp_quickack", issue = "96256")]
+#![stable(feature = "unix_socket_abstract", since = "1.70.0")]
-#[unstable(feature = "unix_socket_abstract", issue = "85410")]
+#[stable(feature = "unix_socket_abstract", since = "1.70.0")]
pub use crate::os::net::linux_ext::addr::SocketAddrExt;
#[unstable(feature = "tcp_quickack", issue = "96256")]
diff --git a/library/std/src/os/linux/raw.rs b/library/std/src/os/linux/raw.rs
index f46028c3a..c55ca8ba2 100644
--- a/library/std/src/os/linux/raw.rs
+++ b/library/std/src/os/linux/raw.rs
@@ -231,6 +231,7 @@ mod arch {
}
#[cfg(any(
+ target_arch = "loongarch64",
target_arch = "mips64",
target_arch = "s390x",
target_arch = "sparc64",
diff --git a/library/std/src/os/net/linux_ext/addr.rs b/library/std/src/os/net/linux_ext/addr.rs
index 85065984f..aed772056 100644
--- a/library/std/src/os/net/linux_ext/addr.rs
+++ b/library/std/src/os/net/linux_ext/addr.rs
@@ -4,7 +4,7 @@ use crate::os::unix::net::SocketAddr;
use crate::sealed::Sealed;
/// Platform-specific extensions to [`SocketAddr`].
-#[unstable(feature = "unix_socket_abstract", issue = "85410")]
+#[stable(feature = "unix_socket_abstract", since = "1.70.0")]
pub trait SocketAddrExt: Sealed {
/// Creates a Unix socket address in the abstract namespace.
///
@@ -22,7 +22,6 @@ pub trait SocketAddrExt: Sealed {
/// # Examples
///
/// ```no_run
- /// #![feature(unix_socket_abstract)]
/// use std::os::unix::net::{UnixListener, SocketAddr};
/// use std::os::linux::net::SocketAddrExt;
///
@@ -38,6 +37,7 @@ pub trait SocketAddrExt: Sealed {
/// Ok(())
/// }
/// ```
+ #[stable(feature = "unix_socket_abstract", since = "1.70.0")]
fn from_abstract_name<N>(name: N) -> crate::io::Result<SocketAddr>
where
N: AsRef<[u8]>;
@@ -47,7 +47,6 @@ pub trait SocketAddrExt: Sealed {
/// # Examples
///
/// ```no_run
- /// #![feature(unix_socket_abstract)]
/// use std::os::unix::net::{UnixListener, SocketAddr};
/// use std::os::linux::net::SocketAddrExt;
///
@@ -60,5 +59,6 @@ pub trait SocketAddrExt: Sealed {
/// Ok(())
/// }
/// ```
+ #[stable(feature = "unix_socket_abstract", since = "1.70.0")]
fn as_abstract_name(&self) -> Option<&[u8]>;
}
diff --git a/library/std/src/os/net/linux_ext/mod.rs b/library/std/src/os/net/linux_ext/mod.rs
index 318ebacfd..62e78cc50 100644
--- a/library/std/src/os/net/linux_ext/mod.rs
+++ b/library/std/src/os/net/linux_ext/mod.rs
@@ -2,7 +2,7 @@
#![doc(cfg(any(target_os = "linux", target_os = "android")))]
-#[unstable(feature = "unix_socket_abstract", issue = "85410")]
+#[stable(feature = "unix_socket_abstract", since = "1.70.0")]
pub(crate) mod addr;
#[unstable(feature = "tcp_quickack", issue = "96256")]
diff --git a/library/std/src/os/unix/net/addr.rs b/library/std/src/os/unix/net/addr.rs
index ece2b33bd..6c99e8c36 100644
--- a/library/std/src/os/unix/net/addr.rs
+++ b/library/std/src/os/unix/net/addr.rs
@@ -245,12 +245,12 @@ impl SocketAddr {
}
}
-#[unstable(feature = "unix_socket_abstract", issue = "85410")]
+#[stable(feature = "unix_socket_abstract", since = "1.70.0")]
impl Sealed for SocketAddr {}
#[doc(cfg(any(target_os = "android", target_os = "linux")))]
#[cfg(any(doc, target_os = "android", target_os = "linux"))]
-#[unstable(feature = "unix_socket_abstract", issue = "85410")]
+#[stable(feature = "unix_socket_abstract", since = "1.70.0")]
impl linux_ext::addr::SocketAddrExt for SocketAddr {
fn as_abstract_name(&self) -> Option<&[u8]> {
if let AddressKind::Abstract(name) = self.address() { Some(name) } else { None }
diff --git a/library/std/src/os/unix/net/ancillary.rs b/library/std/src/os/unix/net/ancillary.rs
index 7cc901a79..7565fbc0d 100644
--- a/library/std/src/os/unix/net/ancillary.rs
+++ b/library/std/src/os/unix/net/ancillary.rs
@@ -86,7 +86,12 @@ fn add_to_ancillary_data<T>(
cmsg_level: libc::c_int,
cmsg_type: libc::c_int,
) -> bool {
- let source_len = if let Some(source_len) = source.len().checked_mul(size_of::<T>()) {
+ #[cfg(not(target_os = "freebsd"))]
+ let cmsg_size = source.len().checked_mul(size_of::<T>());
+ #[cfg(target_os = "freebsd")]
+ let cmsg_size = Some(unsafe { libc::SOCKCRED2SIZE(1) });
+
+ let source_len = if let Some(source_len) = cmsg_size {
if let Ok(source_len) = u32::try_from(source_len) {
source_len
} else {
@@ -178,7 +183,13 @@ impl<'a, T> Iterator for AncillaryDataIter<'a, T> {
}
}
-#[cfg(all(doc, not(target_os = "android"), not(target_os = "linux"), not(target_os = "netbsd")))]
+#[cfg(all(
+ doc,
+ not(target_os = "android"),
+ not(target_os = "linux"),
+ not(target_os = "netbsd"),
+ not(target_os = "freebsd")
+))]
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
#[derive(Clone)]
pub struct SocketCred(());
@@ -194,6 +205,11 @@ pub struct SocketCred(libc::ucred);
#[derive(Clone)]
pub struct SocketCred(libc::sockcred);
+#[cfg(target_os = "freebsd")]
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+#[derive(Clone)]
+pub struct SocketCred(libc::sockcred2);
+
#[doc(cfg(any(target_os = "android", target_os = "linux")))]
#[cfg(any(target_os = "android", target_os = "linux"))]
impl SocketCred {
@@ -246,6 +262,66 @@ impl SocketCred {
}
}
+#[cfg(target_os = "freebsd")]
+impl SocketCred {
+ /// Create a Unix credential struct.
+ ///
+ /// PID, UID and GID is set to 0.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ #[must_use]
+ pub fn new() -> SocketCred {
+ SocketCred(libc::sockcred2 {
+ sc_version: 0,
+ sc_pid: 0,
+ sc_uid: 0,
+ sc_euid: 0,
+ sc_gid: 0,
+ sc_egid: 0,
+ sc_ngroups: 0,
+ sc_groups: [0; 1],
+ })
+ }
+
+ /// Set the PID.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn set_pid(&mut self, pid: libc::pid_t) {
+ self.0.sc_pid = pid;
+ }
+
+ /// Get the current PID.
+ #[must_use]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn get_pid(&self) -> libc::pid_t {
+ self.0.sc_pid
+ }
+
+ /// Set the UID.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn set_uid(&mut self, uid: libc::uid_t) {
+ self.0.sc_euid = uid;
+ }
+
+ /// Get the current UID.
+ #[must_use]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn get_uid(&self) -> libc::uid_t {
+ self.0.sc_euid
+ }
+
+ /// Set the GID.
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn set_gid(&mut self, gid: libc::gid_t) {
+ self.0.sc_egid = gid;
+ }
+
+ /// Get the current GID.
+ #[must_use]
+ #[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+ pub fn get_gid(&self) -> libc::gid_t {
+ self.0.sc_egid
+ }
+}
+
#[cfg(target_os = "netbsd")]
impl SocketCred {
/// Create a Unix credential struct.
@@ -271,6 +347,7 @@ impl SocketCred {
}
/// Get the current PID.
+ #[must_use]
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
pub fn get_pid(&self) -> libc::pid_t {
self.0.sc_pid
@@ -283,6 +360,7 @@ impl SocketCred {
}
/// Get the current UID.
+ #[must_use]
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
pub fn get_uid(&self) -> libc::uid_t {
self.0.sc_uid
@@ -295,6 +373,7 @@ impl SocketCred {
}
/// Get the current GID.
+ #[must_use]
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
pub fn get_gid(&self) -> libc::gid_t {
self.0.sc_gid
@@ -316,7 +395,13 @@ impl<'a> Iterator for ScmRights<'a> {
}
}
-#[cfg(all(doc, not(target_os = "android"), not(target_os = "linux"), not(target_os = "netbsd")))]
+#[cfg(all(
+ doc,
+ not(target_os = "android"),
+ not(target_os = "linux"),
+ not(target_os = "netbsd"),
+ not(target_os = "freebsd")
+))]
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
pub struct ScmCredentials<'a>(AncillaryDataIter<'a, ()>);
@@ -327,11 +412,21 @@ pub struct ScmCredentials<'a>(AncillaryDataIter<'a, ()>);
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
pub struct ScmCredentials<'a>(AncillaryDataIter<'a, libc::ucred>);
+#[cfg(target_os = "freebsd")]
+#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
+pub struct ScmCredentials<'a>(AncillaryDataIter<'a, libc::sockcred2>);
+
#[cfg(target_os = "netbsd")]
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
pub struct ScmCredentials<'a>(AncillaryDataIter<'a, libc::sockcred>);
-#[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+#[cfg(any(
+ doc,
+ target_os = "android",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "freebsd"
+))]
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
impl<'a> Iterator for ScmCredentials<'a> {
type Item = SocketCred;
@@ -353,7 +448,13 @@ pub enum AncillaryError {
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
pub enum AncillaryData<'a> {
ScmRights(ScmRights<'a>),
- #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ #[cfg(any(
+ doc,
+ target_os = "android",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "freebsd"
+ ))]
ScmCredentials(ScmCredentials<'a>),
}
@@ -376,7 +477,13 @@ impl<'a> AncillaryData<'a> {
///
/// `data` must contain a valid control message and the control message must be type of
/// `SOL_SOCKET` and level of `SCM_CREDENTIALS` or `SCM_CREDS`.
- #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ #[cfg(any(
+ doc,
+ target_os = "android",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "freebsd"
+ ))]
unsafe fn as_credentials(data: &'a [u8]) -> Self {
let ancillary_data_iter = AncillaryDataIter::new(data);
let scm_credentials = ScmCredentials(ancillary_data_iter);
@@ -395,6 +502,8 @@ impl<'a> AncillaryData<'a> {
libc::SCM_RIGHTS => Ok(AncillaryData::as_rights(data)),
#[cfg(any(target_os = "android", target_os = "linux",))]
libc::SCM_CREDENTIALS => Ok(AncillaryData::as_credentials(data)),
+ #[cfg(target_os = "freebsd")]
+ libc::SCM_CREDS2 => Ok(AncillaryData::as_credentials(data)),
#[cfg(target_os = "netbsd")]
libc::SCM_CREDS => Ok(AncillaryData::as_credentials(data)),
cmsg_type => {
@@ -603,12 +712,18 @@ impl<'a> SocketAncillary<'a> {
/// Add credentials to the ancillary data.
///
- /// The function returns `true` if there was enough space in the buffer.
- /// If there was not enough space then no credentials was appended.
+ /// The function returns `true` if there is enough space in the buffer.
+ /// If there is not enough space then no credentials will be appended.
/// Technically, that means this operation adds a control message with the level `SOL_SOCKET`
- /// and type `SCM_CREDENTIALS` or `SCM_CREDS`.
- ///
- #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ /// and type `SCM_CREDENTIALS`, `SCM_CREDS`, or `SCM_CREDS2`.
+ ///
+ #[cfg(any(
+ doc,
+ target_os = "android",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "freebsd"
+ ))]
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
pub fn add_creds(&mut self, creds: &[SocketCred]) -> bool {
self.truncated = false;
@@ -617,8 +732,10 @@ impl<'a> SocketAncillary<'a> {
&mut self.length,
creds,
libc::SOL_SOCKET,
- #[cfg(not(target_os = "netbsd"))]
+ #[cfg(not(any(target_os = "netbsd", target_os = "freebsd")))]
libc::SCM_CREDENTIALS,
+ #[cfg(target_os = "freebsd")]
+ libc::SCM_CREDS2,
#[cfg(target_os = "netbsd")]
libc::SCM_CREDS,
)
diff --git a/library/std/src/os/unix/net/datagram.rs b/library/std/src/os/unix/net/datagram.rs
index 272b4f5dc..34db54235 100644
--- a/library/std/src/os/unix/net/datagram.rs
+++ b/library/std/src/os/unix/net/datagram.rs
@@ -102,7 +102,6 @@ impl UnixDatagram {
/// # Examples
///
/// ```no_run
- /// #![feature(unix_socket_abstract)]
/// use std::os::unix::net::{UnixDatagram};
///
/// fn main() -> std::io::Result<()> {
@@ -119,7 +118,7 @@ impl UnixDatagram {
/// Ok(())
/// }
/// ```
- #[unstable(feature = "unix_socket_abstract", issue = "85410")]
+ #[stable(feature = "unix_socket_abstract", since = "1.70.0")]
pub fn bind_addr(socket_addr: &SocketAddr) -> io::Result<UnixDatagram> {
unsafe {
let socket = UnixDatagram::unbound()?;
@@ -217,7 +216,6 @@ impl UnixDatagram {
/// # Examples
///
/// ```no_run
- /// #![feature(unix_socket_abstract)]
/// use std::os::unix::net::{UnixDatagram};
///
/// fn main() -> std::io::Result<()> {
@@ -235,7 +233,7 @@ impl UnixDatagram {
/// Ok(())
/// }
/// ```
- #[unstable(feature = "unix_socket_abstract", issue = "85410")]
+ #[stable(feature = "unix_socket_abstract", since = "1.70.0")]
pub fn connect_addr(&self, socket_addr: &SocketAddr) -> io::Result<()> {
unsafe {
cvt(libc::connect(
@@ -523,7 +521,6 @@ impl UnixDatagram {
/// # Examples
///
/// ```no_run
- /// #![feature(unix_socket_abstract)]
/// use std::os::unix::net::{UnixDatagram};
///
/// fn main() -> std::io::Result<()> {
@@ -535,7 +532,7 @@ impl UnixDatagram {
/// Ok(())
/// }
/// ```
- #[unstable(feature = "unix_socket_abstract", issue = "85410")]
+ #[stable(feature = "unix_socket_abstract", since = "1.70.0")]
pub fn send_to_addr(&self, buf: &[u8], socket_addr: &SocketAddr) -> io::Result<usize> {
unsafe {
let count = cvt(libc::sendto(
@@ -811,8 +808,24 @@ impl UnixDatagram {
///
/// # Examples
///
- #[cfg_attr(any(target_os = "android", target_os = "linux"), doc = "```no_run")]
- #[cfg_attr(not(any(target_os = "android", target_os = "linux")), doc = "```ignore")]
+ #[cfg_attr(
+ any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "freebsd",
+ ),
+ doc = "```no_run"
+ )]
+ #[cfg_attr(
+ not(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "freebsd"
+ )),
+ doc = "```ignore"
+ )]
/// #![feature(unix_socket_ancillary_data)]
/// use std::os::unix::net::UnixDatagram;
///
@@ -822,7 +835,13 @@ impl UnixDatagram {
/// Ok(())
/// }
/// ```
- #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ #[cfg(any(
+ doc,
+ target_os = "android",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "freebsd"
+ ))]
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
pub fn set_passcred(&self, passcred: bool) -> io::Result<()> {
self.0.set_passcred(passcred)
@@ -834,7 +853,13 @@ impl UnixDatagram {
/// Get the socket option `SO_PASSCRED`.
///
/// [`set_passcred`]: UnixDatagram::set_passcred
- #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ #[cfg(any(
+ doc,
+ target_os = "android",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "freebsd"
+ ))]
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
pub fn passcred(&self) -> io::Result<bool> {
self.0.passcred()
diff --git a/library/std/src/os/unix/net/listener.rs b/library/std/src/os/unix/net/listener.rs
index 02090afc8..5be8aebc7 100644
--- a/library/std/src/os/unix/net/listener.rs
+++ b/library/std/src/os/unix/net/listener.rs
@@ -90,7 +90,6 @@ impl UnixListener {
/// # Examples
///
/// ```no_run
- /// #![feature(unix_socket_abstract)]
/// use std::os::unix::net::{UnixListener};
///
/// fn main() -> std::io::Result<()> {
@@ -107,7 +106,7 @@ impl UnixListener {
/// Ok(())
/// }
/// ```
- #[unstable(feature = "unix_socket_abstract", issue = "85410")]
+ #[stable(feature = "unix_socket_abstract", since = "1.70.0")]
pub fn bind_addr(socket_addr: &SocketAddr) -> io::Result<UnixListener> {
unsafe {
let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
diff --git a/library/std/src/os/unix/net/stream.rs b/library/std/src/os/unix/net/stream.rs
index dff8f6e85..bf2a51b5e 100644
--- a/library/std/src/os/unix/net/stream.rs
+++ b/library/std/src/os/unix/net/stream.rs
@@ -106,7 +106,6 @@ impl UnixStream {
/// # Examples
///
/// ```no_run
- /// #![feature(unix_socket_abstract)]
/// use std::os::unix::net::{UnixListener, UnixStream};
///
/// fn main() -> std::io::Result<()> {
@@ -123,7 +122,7 @@ impl UnixStream {
/// Ok(())
/// }
/// ````
- #[unstable(feature = "unix_socket_abstract", issue = "85410")]
+ #[stable(feature = "unix_socket_abstract", since = "1.70.0")]
pub fn connect_addr(socket_addr: &SocketAddr) -> io::Result<UnixStream> {
unsafe {
let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?;
@@ -398,8 +397,24 @@ impl UnixStream {
///
/// # Examples
///
- #[cfg_attr(any(target_os = "android", target_os = "linux"), doc = "```no_run")]
- #[cfg_attr(not(any(target_os = "android", target_os = "linux")), doc = "```ignore")]
+ #[cfg_attr(
+ any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "freebsd"
+ ),
+ doc = "```no_run"
+ )]
+ #[cfg_attr(
+ not(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "freebsd"
+ )),
+ doc = "```ignore"
+ )]
/// #![feature(unix_socket_ancillary_data)]
/// use std::os::unix::net::UnixStream;
///
@@ -409,7 +424,13 @@ impl UnixStream {
/// Ok(())
/// }
/// ```
- #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ #[cfg(any(
+ doc,
+ target_os = "android",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "freebsd"
+ ))]
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
pub fn set_passcred(&self, passcred: bool) -> io::Result<()> {
self.0.set_passcred(passcred)
@@ -421,7 +442,13 @@ impl UnixStream {
/// Get the socket option `SO_PASSCRED`.
///
/// [`set_passcred`]: UnixStream::set_passcred
- #[cfg(any(doc, target_os = "android", target_os = "linux", target_os = "netbsd",))]
+ #[cfg(any(
+ doc,
+ target_os = "android",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "freebsd"
+ ))]
#[unstable(feature = "unix_socket_ancillary_data", issue = "76915")]
pub fn passcred(&self) -> io::Result<bool> {
self.0.passcred()
diff --git a/library/std/src/os/unix/net/tests.rs b/library/std/src/os/unix/net/tests.rs
index f8c29a6d3..39f10c50d 100644
--- a/library/std/src/os/unix/net/tests.rs
+++ b/library/std/src/os/unix/net/tests.rs
@@ -646,7 +646,7 @@ fn test_send_vectored_fds_unix_stream() {
}
}
-#[cfg(any(target_os = "android", target_os = "linux",))]
+#[cfg(any(target_os = "android", target_os = "linux", target_os = "freebsd"))]
#[test]
fn test_send_vectored_with_ancillary_to_unix_datagram() {
fn getpid() -> libc::pid_t {
diff --git a/library/std/src/os/windows/io/handle.rs b/library/std/src/os/windows/io/handle.rs
index 1dfecc573..50410fcdf 100644
--- a/library/std/src/os/windows/io/handle.rs
+++ b/library/std/src/os/windows/io/handle.rs
@@ -389,7 +389,7 @@ macro_rules! impl_is_terminal {
#[unstable(feature = "sealed", issue = "none")]
impl crate::sealed::Sealed for $t {}
- #[unstable(feature = "is_terminal", issue = "98070")]
+ #[stable(feature = "is_terminal", since = "1.70.0")]
impl crate::io::IsTerminal for $t {
#[inline]
fn is_terminal(&self) -> bool {
diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs
index 9fa8f5702..345d72ef8 100644
--- a/library/std/src/panic.rs
+++ b/library/std/src/panic.rs
@@ -308,8 +308,7 @@ pub fn get_backtrace_style() -> Option<BacktraceStyle> {
BacktraceStyle::Short
}
})
- .unwrap_or(if cfg!(target_os = "fuchsia") {
- // Fuchsia components default to full backtrace.
+ .unwrap_or(if crate::sys::FULL_BACKTRACE_DEFAULT {
BacktraceStyle::Full
} else {
BacktraceStyle::Off
diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs
index e59f32af7..a46a29cba 100644
--- a/library/std/src/panicking.rs
+++ b/library/std/src/panicking.rs
@@ -46,12 +46,10 @@ extern "C" {
fn __rust_panic_cleanup(payload: *mut u8) -> *mut (dyn Any + Send + 'static);
}
-#[allow(improper_ctypes)]
extern "Rust" {
- /// `payload` is passed through another layer of raw pointers as `&mut dyn Trait` is not
- /// FFI-safe. `BoxMeUp` lazily performs allocation only when needed (this avoids allocations
- /// when using the "abort" panic runtime).
- fn __rust_start_panic(payload: *mut &mut dyn BoxMeUp) -> u32;
+ /// `BoxMeUp` lazily performs allocation only when needed (this avoids
+ /// allocations when using the "abort" panic runtime).
+ fn __rust_start_panic(payload: &mut dyn BoxMeUp) -> u32;
}
/// This function is called by the panic runtime if FFI code catches a Rust
@@ -500,6 +498,7 @@ pub unsafe fn r#try<R, F: FnOnce() -> R>(f: F) -> Result<R, Box<dyn Any + Send>>
// This function cannot be marked as `unsafe` because `intrinsics::r#try`
// expects normal function pointers.
#[inline]
+ #[rustc_nounwind] // `intrinsic::r#try` requires catch fn to be nounwind
fn do_catch<F: FnOnce() -> R, R>(data: *mut u8, payload: *mut u8) {
// SAFETY: this is the responsibility of the caller, see above.
//
@@ -738,10 +737,7 @@ pub fn rust_panic_without_hook(payload: Box<dyn Any + Send>) -> ! {
/// yer breakpoints.
#[inline(never)]
#[cfg_attr(not(test), rustc_std_internal_symbol)]
-fn rust_panic(mut msg: &mut dyn BoxMeUp) -> ! {
- let code = unsafe {
- let obj = &mut msg as *mut &mut dyn BoxMeUp;
- __rust_start_panic(obj)
- };
+fn rust_panic(msg: &mut dyn BoxMeUp) -> ! {
+ let code = unsafe { __rust_start_panic(msg) };
rtabort!("failed to initiate panic, error {code}")
}
diff --git a/library/std/src/path.rs b/library/std/src/path.rs
index cd6b393a2..b3d883de0 100644
--- a/library/std/src/path.rs
+++ b/library/std/src/path.rs
@@ -78,7 +78,7 @@ use crate::fmt;
use crate::fs;
use crate::hash::{Hash, Hasher};
use crate::io;
-use crate::iter::{self, FusedIterator};
+use crate::iter::FusedIterator;
use crate::ops::{self, Deref};
use crate::rc::Rc;
use crate::str::FromStr;
@@ -450,26 +450,26 @@ impl<'a> PrefixComponent<'a> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a> cmp::PartialEq for PrefixComponent<'a> {
+impl<'a> PartialEq for PrefixComponent<'a> {
#[inline]
fn eq(&self, other: &PrefixComponent<'a>) -> bool {
- cmp::PartialEq::eq(&self.parsed, &other.parsed)
+ self.parsed == other.parsed
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a> cmp::PartialOrd for PrefixComponent<'a> {
+impl<'a> PartialOrd for PrefixComponent<'a> {
#[inline]
fn partial_cmp(&self, other: &PrefixComponent<'a>) -> Option<cmp::Ordering> {
- cmp::PartialOrd::partial_cmp(&self.parsed, &other.parsed)
+ PartialOrd::partial_cmp(&self.parsed, &other.parsed)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl cmp::Ord for PrefixComponent<'_> {
+impl Ord for PrefixComponent<'_> {
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
- cmp::Ord::cmp(&self.parsed, &other.parsed)
+ Ord::cmp(&self.parsed, &other.parsed)
}
}
@@ -988,7 +988,7 @@ impl<'a> DoubleEndedIterator for Components<'a> {
impl FusedIterator for Components<'_> {}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a> cmp::PartialEq for Components<'a> {
+impl<'a> PartialEq for Components<'a> {
#[inline]
fn eq(&self, other: &Components<'a>) -> bool {
let Components { path: _, front: _, back: _, has_physical_root: _, prefix: _ } = self;
@@ -1015,10 +1015,10 @@ impl<'a> cmp::PartialEq for Components<'a> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl cmp::Eq for Components<'_> {}
+impl Eq for Components<'_> {}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a> cmp::PartialOrd for Components<'a> {
+impl<'a> PartialOrd for Components<'a> {
#[inline]
fn partial_cmp(&self, other: &Components<'a>) -> Option<cmp::Ordering> {
Some(compare_components(self.clone(), other.clone()))
@@ -1026,7 +1026,7 @@ impl<'a> cmp::PartialOrd for Components<'a> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl cmp::Ord for Components<'_> {
+impl Ord for Components<'_> {
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
compare_components(self.clone(), other.clone())
@@ -1498,7 +1498,6 @@ impl PathBuf {
/// # Examples
///
/// ```
- /// #![feature(path_as_mut_os_str)]
/// use std::path::{Path, PathBuf};
///
/// let mut path = PathBuf::from("/foo");
@@ -1510,7 +1509,7 @@ impl PathBuf {
/// path.as_mut_os_string().push("baz");
/// assert_eq!(path, Path::new("/foo/barbaz"));
/// ```
- #[unstable(feature = "path_as_mut_os_str", issue = "105021")]
+ #[stable(feature = "path_as_mut_os_str", since = "1.70.0")]
#[must_use]
#[inline]
pub fn as_mut_os_string(&mut self) -> &mut OsString {
@@ -1742,7 +1741,7 @@ impl FromStr for PathBuf {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<P: AsRef<Path>> iter::FromIterator<P> for PathBuf {
+impl<P: AsRef<Path>> FromIterator<P> for PathBuf {
fn from_iter<I: IntoIterator<Item = P>>(iter: I) -> PathBuf {
let mut buf = PathBuf::new();
buf.extend(iter);
@@ -1751,7 +1750,7 @@ impl<P: AsRef<Path>> iter::FromIterator<P> for PathBuf {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<P: AsRef<Path>> iter::Extend<P> for PathBuf {
+impl<P: AsRef<Path>> Extend<P> for PathBuf {
fn extend<I: IntoIterator<Item = P>>(&mut self, iter: I) {
iter.into_iter().for_each(move |p| self.push(p.as_ref()));
}
@@ -1905,7 +1904,7 @@ impl ToOwned for Path {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl cmp::PartialEq for PathBuf {
+impl PartialEq for PathBuf {
#[inline]
fn eq(&self, other: &PathBuf) -> bool {
self.components() == other.components()
@@ -1920,10 +1919,10 @@ impl Hash for PathBuf {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl cmp::Eq for PathBuf {}
+impl Eq for PathBuf {}
#[stable(feature = "rust1", since = "1.0.0")]
-impl cmp::PartialOrd for PathBuf {
+impl PartialOrd for PathBuf {
#[inline]
fn partial_cmp(&self, other: &PathBuf) -> Option<cmp::Ordering> {
Some(compare_components(self.components(), other.components()))
@@ -1931,7 +1930,7 @@ impl cmp::PartialOrd for PathBuf {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl cmp::Ord for PathBuf {
+impl Ord for PathBuf {
#[inline]
fn cmp(&self, other: &PathBuf) -> cmp::Ordering {
compare_components(self.components(), other.components())
@@ -2066,7 +2065,6 @@ impl Path {
/// # Examples
///
/// ```
- /// #![feature(path_as_mut_os_str)]
/// use std::path::{Path, PathBuf};
///
/// let mut path = PathBuf::from("Foo.TXT");
@@ -2076,7 +2074,7 @@ impl Path {
/// path.as_mut_os_str().make_ascii_lowercase();
/// assert_eq!(path, Path::new("foo.txt"));
/// ```
- #[unstable(feature = "path_as_mut_os_str", issue = "105021")]
+ #[stable(feature = "path_as_mut_os_str", since = "1.70.0")]
#[must_use]
#[inline]
pub fn as_mut_os_str(&mut self) -> &mut OsStr {
@@ -3027,7 +3025,7 @@ impl fmt::Display for Display<'_> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl cmp::PartialEq for Path {
+impl PartialEq for Path {
#[inline]
fn eq(&self, other: &Path) -> bool {
self.components() == other.components()
@@ -3086,10 +3084,10 @@ impl Hash for Path {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl cmp::Eq for Path {}
+impl Eq for Path {}
#[stable(feature = "rust1", since = "1.0.0")]
-impl cmp::PartialOrd for Path {
+impl PartialOrd for Path {
#[inline]
fn partial_cmp(&self, other: &Path) -> Option<cmp::Ordering> {
Some(compare_components(self.components(), other.components()))
@@ -3097,7 +3095,7 @@ impl cmp::PartialOrd for Path {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl cmp::Ord for Path {
+impl Ord for Path {
#[inline]
fn cmp(&self, other: &Path) -> cmp::Ordering {
compare_components(self.components(), other.components())
diff --git a/library/std/src/personality/gcc.rs b/library/std/src/personality/gcc.rs
index 41c0fe725..0421b47be 100644
--- a/library/std/src/personality/gcc.rs
+++ b/library/std/src/personality/gcc.rs
@@ -77,6 +77,9 @@ const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1
#[cfg(any(target_arch = "riscv64", target_arch = "riscv32"))]
const UNWIND_DATA_REG: (i32, i32) = (10, 11); // x10, x11
+#[cfg(target_arch = "loongarch64")]
+const UNWIND_DATA_REG: (i32, i32) = (4, 5); // a0, a1
+
// The following code is based on GCC's C and C++ personality routines. For reference, see:
// https://github.com/gcc-mirror/gcc/blob/master/libstdc++-v3/libsupc++/eh_personality.cc
// https://github.com/gcc-mirror/gcc/blob/trunk/libgcc/unwind-c.c
diff --git a/library/std/src/prelude/mod.rs b/library/std/src/prelude/mod.rs
index c314bbbb6..1b29c887d 100644
--- a/library/std/src/prelude/mod.rs
+++ b/library/std/src/prelude/mod.rs
@@ -34,7 +34,7 @@
//! marker traits that indicate fundamental properties of types.
//! * <code>[std::ops]::{[Drop], [Fn], [FnMut], [FnOnce]}</code>, various
//! operations for both destructors and overloading `()`.
-//! * <code>[std::mem]::[drop][mem::drop]</code>, a convenience function for explicitly
+//! * <code>[std::mem]::[drop]</code>, a convenience function for explicitly
//! dropping a value.
//! * <code>[std::boxed]::[Box]</code>, a way to allocate values on the heap.
//! * <code>[std::borrow]::[ToOwned]</code>, the conversion trait that defines
@@ -66,7 +66,6 @@
//! * <code>[std::convert]::{[TryFrom], [TryInto]}</code>,
//! * <code>[std::iter]::[FromIterator]</code>.
//!
-//! [mem::drop]: crate::mem::drop
//! [std::borrow]: crate::borrow
//! [std::boxed]: crate::boxed
//! [std::clone]: crate::clone
@@ -86,9 +85,6 @@
//! [std::slice]: crate::slice
//! [std::string]: crate::string
//! [std::vec]: mod@crate::vec
-//! [TryFrom]: crate::convert::TryFrom
-//! [TryInto]: crate::convert::TryInto
-//! [FromIterator]: crate::iter::FromIterator
//! [`to_owned`]: crate::borrow::ToOwned::to_owned
//! [book-closures]: ../../book/ch13-01-closures.html
//! [book-dtor]: ../../book/ch15-03-drop.html
diff --git a/library/std/src/primitive_docs.rs b/library/std/src/primitive_docs.rs
index 6f78811a1..3df990e5d 100644
--- a/library/std/src/primitive_docs.rs
+++ b/library/std/src/primitive_docs.rs
@@ -1,7 +1,8 @@
// `library/{std,core}/src/primitive_docs.rs` should have the same contents.
// These are different files so that relative links work properly without
// having to have `CARGO_PKG_NAME` set, but conceptually they should always be the same.
-#[doc(primitive = "bool")]
+#[cfg_attr(bootstrap, doc(primitive = "bool"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "bool")]
#[doc(alias = "true")]
#[doc(alias = "false")]
/// The boolean type.
@@ -63,7 +64,8 @@
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_bool {}
-#[doc(primitive = "never")]
+#[cfg_attr(bootstrap, doc(primitive = "never"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "never")]
#[doc(alias = "!")]
//
/// The `!` type, also called "never".
@@ -274,7 +276,8 @@ mod prim_bool {}
#[unstable(feature = "never_type", issue = "35121")]
mod prim_never {}
-#[doc(primitive = "char")]
+#[cfg_attr(bootstrap, doc(primitive = "char"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "char")]
#[allow(rustdoc::invalid_rust_codeblocks)]
/// A character type.
///
@@ -398,7 +401,8 @@ mod prim_never {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_char {}
-#[doc(primitive = "unit")]
+#[cfg_attr(bootstrap, doc(primitive = "unit"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "unit")]
#[doc(alias = "(")]
#[doc(alias = ")")]
#[doc(alias = "()")]
@@ -460,7 +464,8 @@ impl Copy for () {
// empty
}
-#[doc(primitive = "pointer")]
+#[cfg_attr(bootstrap, doc(primitive = "pointer"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "pointer")]
#[doc(alias = "ptr")]
#[doc(alias = "*")]
#[doc(alias = "*const")]
@@ -572,12 +577,12 @@ impl Copy for () {
/// [`is_null`]: pointer::is_null
/// [`offset`]: pointer::offset
#[doc = concat!("[`into_raw`]: ", include_str!("../primitive_docs/box_into_raw.md"))]
-/// [`drop`]: mem::drop
/// [`write`]: ptr::write
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_pointer {}
-#[doc(primitive = "array")]
+#[cfg_attr(bootstrap, doc(primitive = "array"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "array")]
#[doc(alias = "[]")]
#[doc(alias = "[T;N]")] // unfortunately, rustdoc doesn't have fuzzy search for aliases
#[doc(alias = "[T; N]")]
@@ -778,7 +783,8 @@ mod prim_pointer {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_array {}
-#[doc(primitive = "slice")]
+#[cfg_attr(bootstrap, doc(primitive = "slice"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "slice")]
#[doc(alias = "[")]
#[doc(alias = "]")]
#[doc(alias = "[]")]
@@ -870,7 +876,8 @@ mod prim_array {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_slice {}
-#[doc(primitive = "str")]
+#[cfg_attr(bootstrap, doc(primitive = "str"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "str")]
/// String slices.
///
/// *[See also the `std::str` module](crate::str).*
@@ -937,7 +944,8 @@ mod prim_slice {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_str {}
-#[doc(primitive = "tuple")]
+#[cfg_attr(bootstrap, doc(primitive = "tuple"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "tuple")]
#[doc(alias = "(")]
#[doc(alias = ")")]
#[doc(alias = "()")]
@@ -1017,7 +1025,6 @@ mod prim_str {}
/// * [`UnwindSafe`]
/// * [`RefUnwindSafe`]
///
-/// [`Unpin`]: marker::Unpin
/// [`UnwindSafe`]: panic::UnwindSafe
/// [`RefUnwindSafe`]: panic::RefUnwindSafe
///
@@ -1081,7 +1088,8 @@ impl<T: Copy> Copy for (T,) {
// empty
}
-#[doc(primitive = "f32")]
+#[cfg_attr(bootstrap, doc(primitive = "f32"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "f32")]
/// A 32-bit floating point type (specifically, the "binary32" type defined in IEEE 754-2008).
///
/// This type can represent a wide range of decimal numbers, like `3.5`, `27`,
@@ -1110,7 +1118,7 @@ impl<T: Copy> Copy for (T,) {
/// - [NaN (not a number)](#associatedconstant.NAN): this value results from
/// calculations like `(-1.0).sqrt()`. NaN has some potentially unexpected
/// behavior:
-/// - It is unequal to any float, including itself! This is the reason `f32`
+/// - It is not equal to any float, including itself! This is the reason `f32`
/// doesn't implement the `Eq` trait.
/// - It is also neither smaller nor greater than any float, making it
/// impossible to sort by the default comparison operation, which is the
@@ -1147,7 +1155,8 @@ impl<T: Copy> Copy for (T,) {
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_f32 {}
-#[doc(primitive = "f64")]
+#[cfg_attr(bootstrap, doc(primitive = "f64"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "f64")]
/// A 64-bit floating point type (specifically, the "binary64" type defined in IEEE 754-2008).
///
/// This type is very similar to [`f32`], but has increased
@@ -1162,67 +1171,78 @@ mod prim_f32 {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_f64 {}
-#[doc(primitive = "i8")]
+#[cfg_attr(bootstrap, doc(primitive = "i8"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "i8")]
//
/// The 8-bit signed integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_i8 {}
-#[doc(primitive = "i16")]
+#[cfg_attr(bootstrap, doc(primitive = "i16"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "i16")]
//
/// The 16-bit signed integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_i16 {}
-#[doc(primitive = "i32")]
+#[cfg_attr(bootstrap, doc(primitive = "i32"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "i32")]
//
/// The 32-bit signed integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_i32 {}
-#[doc(primitive = "i64")]
+#[cfg_attr(bootstrap, doc(primitive = "i64"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "i64")]
//
/// The 64-bit signed integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_i64 {}
-#[doc(primitive = "i128")]
+#[cfg_attr(bootstrap, doc(primitive = "i128"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "i128")]
//
/// The 128-bit signed integer type.
#[stable(feature = "i128", since = "1.26.0")]
mod prim_i128 {}
-#[doc(primitive = "u8")]
+#[cfg_attr(bootstrap, doc(primitive = "u8"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "u8")]
//
/// The 8-bit unsigned integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_u8 {}
-#[doc(primitive = "u16")]
+#[cfg_attr(bootstrap, doc(primitive = "u16"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "u16")]
//
/// The 16-bit unsigned integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_u16 {}
-#[doc(primitive = "u32")]
+#[cfg_attr(bootstrap, doc(primitive = "u32"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "u32")]
//
/// The 32-bit unsigned integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_u32 {}
-#[doc(primitive = "u64")]
+#[cfg_attr(bootstrap, doc(primitive = "u64"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "u64")]
//
/// The 64-bit unsigned integer type.
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_u64 {}
-#[doc(primitive = "u128")]
+#[cfg_attr(bootstrap, doc(primitive = "u128"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "u128")]
//
/// The 128-bit unsigned integer type.
#[stable(feature = "i128", since = "1.26.0")]
mod prim_u128 {}
-#[doc(primitive = "isize")]
+#[cfg_attr(bootstrap, doc(primitive = "isize"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "isize")]
//
/// The pointer-sized signed integer type.
///
@@ -1232,7 +1252,8 @@ mod prim_u128 {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_isize {}
-#[doc(primitive = "usize")]
+#[cfg_attr(bootstrap, doc(primitive = "usize"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "usize")]
//
/// The pointer-sized unsigned integer type.
///
@@ -1242,7 +1263,8 @@ mod prim_isize {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_usize {}
-#[doc(primitive = "reference")]
+#[cfg_attr(bootstrap, doc(primitive = "reference"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "reference")]
#[doc(alias = "&")]
#[doc(alias = "&mut")]
//
@@ -1338,6 +1360,7 @@ mod prim_usize {}
/// * [`Hash`]
/// * [`ToSocketAddrs`]
/// * [`Send`] \(`&T` references also require <code>T: [Sync]</code>)
+/// * [`Sync`]
///
/// [`std::fmt`]: fmt
/// [`Hash`]: hash::Hash
@@ -1373,16 +1396,13 @@ mod prim_usize {}
#[stable(feature = "rust1", since = "1.0.0")]
mod prim_ref {}
-#[doc(primitive = "fn")]
+#[cfg_attr(bootstrap, doc(primitive = "fn"))]
+#[cfg_attr(not(bootstrap), rustc_doc_primitive = "fn")]
//
/// Function pointers, like `fn(usize) -> bool`.
///
/// *See also the traits [`Fn`], [`FnMut`], and [`FnOnce`].*
///
-/// [`Fn`]: ops::Fn
-/// [`FnMut`]: ops::FnMut
-/// [`FnOnce`]: ops::FnOnce
-///
/// Function pointers are pointers that point to *code*, not data. They can be called
/// just like functions. Like references, function pointers are, among other things, assumed to
/// not be null, so if you want to pass a function pointer over FFI and be able to accommodate null
diff --git a/library/std/src/process.rs b/library/std/src/process.rs
index 1952e19e6..0ab72f7ea 100644
--- a/library/std/src/process.rs
+++ b/library/std/src/process.rs
@@ -110,7 +110,7 @@ use crate::convert::Infallible;
use crate::ffi::OsStr;
use crate::fmt;
use crate::fs;
-use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut};
use crate::num::NonZeroI32;
use crate::path::Path;
use crate::str;
@@ -354,6 +354,10 @@ impl Read for ChildStdout {
self.inner.read(buf)
}
+ fn read_buf(&mut self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.inner.read_buf(buf)
+ }
+
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.read_vectored(bufs)
}
@@ -419,6 +423,10 @@ impl Read for ChildStderr {
self.inner.read(buf)
}
+ fn read_buf(&mut self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.inner.read_buf(buf)
+ }
+
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.read_vectored(bufs)
}
@@ -644,10 +652,19 @@ impl Command {
self
}
- /// Inserts or updates an environment variable mapping.
+ /// Inserts or updates an explicit environment variable mapping.
///
- /// Note that environment variable names are case-insensitive (but case-preserving) on Windows,
- /// and case-sensitive on all other platforms.
+ /// This method allows you to add an environment variable mapping to the spawned process or
+ /// overwrite a previously set value. You can use [`Command::envs`] to set multiple environment
+ /// variables simultaneously.
+ ///
+ /// Child processes will inherit environment variables from their parent process by default.
+ /// Environment variables explicitly set using [`Command::env`] take precedence over inherited
+ /// variables. You can disable environment variable inheritance entirely using
+ /// [`Command::env_clear`] or for a single key using [`Command::env_remove`].
+ ///
+ /// Note that environment variable names are case-insensitive (but
+ /// case-preserving) on Windows and case-sensitive on all other platforms.
///
/// # Examples
///
@@ -671,7 +688,19 @@ impl Command {
self
}
- /// Adds or updates multiple environment variable mappings.
+ /// Inserts or updates multiple explicit environment variable mappings.
+ ///
+ /// This method allows you to add multiple environment variable mappings to the spawned process
+ /// or overwrite previously set values. You can use [`Command::env`] to set a single environment
+ /// variable.
+ ///
+ /// Child processes will inherit environment variables from their parent process by default.
+ /// Environment variables explicitly set using [`Command::envs`] take precedence over inherited
+ /// variables. You can disable environment variable inheritance entirely using
+ /// [`Command::env_clear`] or for a single key using [`Command::env_remove`].
+ ///
+ /// Note that environment variable names are case-insensitive (but case-preserving) on Windows
+ /// and case-sensitive on all other platforms.
///
/// # Examples
///
@@ -708,7 +737,18 @@ impl Command {
self
}
- /// Removes an environment variable mapping.
+ /// Removes an explicitly set environment variable and prevents inheriting it from a parent
+ /// process.
+ ///
+ /// This method will remove the explicit value of an environment variable set via
+ /// [`Command::env`] or [`Command::envs`]. In addition, it will prevent the spawned child
+ /// process from inheriting that environment variable from its parent process.
+ ///
+ /// After calling [`Command::env_remove`], the value associated with its key from
+ /// [`Command::get_envs`] will be [`None`].
+ ///
+ /// To clear all explicitly set environment variables and disable all environment variable
+ /// inheritance, you can use [`Command::env_clear`].
///
/// # Examples
///
@@ -728,7 +768,17 @@ impl Command {
self
}
- /// Clears the entire environment map for the child process.
+ /// Clears all explicitly set environment variables and prevents inheriting any parent process
+ /// environment variables.
+ ///
+ /// This method will remove all explicitly added environment variables set via [`Command::env`]
+ /// or [`Command::envs`]. In addition, it will prevent the spawned child process from inheriting
+ /// any environment variable from its parent process.
+ ///
+ /// After calling [`Command::env_remove`], the iterator from [`Command::get_envs`] will be
+ /// empty.
+ ///
+ /// You can use [`Command::env_remove`] to clear a single mapping.
///
/// # Examples
///
@@ -980,17 +1030,21 @@ impl Command {
CommandArgs { inner: self.inner.get_args() }
}
- /// Returns an iterator of the environment variables that will be set when
- /// the process is spawned.
+ /// Returns an iterator of the environment variables explicitly set for the child process.
+ ///
+ /// Environment variables explicitly set using [`Command::env`], [`Command::envs`], and
+ /// [`Command::env_remove`] can be retrieved with this method.
+ ///
+ /// Note that this output does not include environment variables inherited from the parent
+ /// process.
///
- /// Each element is a tuple `(&OsStr, Option<&OsStr>)`, where the first
- /// value is the key, and the second is the value, which is [`None`] if
- /// the environment variable is to be explicitly removed.
+ /// Each element is a tuple key/value pair `(&OsStr, Option<&OsStr>)`. A [`None`] value
+ /// indicates its key was explicitly removed via [`Command::env_remove`]. The associated key for
+ /// the [`None`] value will no longer inherit from its parent process.
///
- /// This only includes environment variables explicitly set with
- /// [`Command::env`], [`Command::envs`], and [`Command::env_remove`]. It
- /// does not include environment variables that will be inherited by the
- /// child process.
+ /// An empty iterator can indicate that no explicit mappings were added or that
+ /// [`Command::env_clear`] was called. After calling [`Command::env_clear`], the child process
+ /// will not inherit any environment variables from its parent process.
///
/// # Examples
///
diff --git a/library/std/src/process/tests.rs b/library/std/src/process/tests.rs
index b4f6cc2da..d7f4d335d 100644
--- a/library/std/src/process/tests.rs
+++ b/library/std/src/process/tests.rs
@@ -1,7 +1,8 @@
use crate::io::prelude::*;
use super::{Command, Output, Stdio};
-use crate::io::ErrorKind;
+use crate::io::{BorrowedBuf, ErrorKind};
+use crate::mem::MaybeUninit;
use crate::str;
fn known_command() -> Command {
@@ -121,6 +122,37 @@ fn stdin_works() {
#[test]
#[cfg_attr(any(target_os = "vxworks"), ignore)]
+fn child_stdout_read_buf() {
+ let mut cmd = if cfg!(target_os = "windows") {
+ let mut cmd = Command::new("cmd");
+ cmd.arg("/C").arg("echo abc");
+ cmd
+ } else {
+ let mut cmd = shell_cmd();
+ cmd.arg("-c").arg("echo abc");
+ cmd
+ };
+ cmd.stdin(Stdio::null());
+ cmd.stdout(Stdio::piped());
+ let child = cmd.spawn().unwrap();
+
+ let mut stdout = child.stdout.unwrap();
+ let mut buf: [MaybeUninit<u8>; 128] = MaybeUninit::uninit_array();
+ let mut buf = BorrowedBuf::from(buf.as_mut_slice());
+ stdout.read_buf(buf.unfilled()).unwrap();
+
+ // ChildStdout::read_buf should omit buffer initialization.
+ if cfg!(target_os = "windows") {
+ assert_eq!(buf.filled(), b"abc\r\n");
+ assert_eq!(buf.init_len(), 5);
+ } else {
+ assert_eq!(buf.filled(), b"abc\n");
+ assert_eq!(buf.init_len(), 4);
+ };
+}
+
+#[test]
+#[cfg_attr(any(target_os = "vxworks"), ignore)]
fn test_process_status() {
let mut status = if cfg!(target_os = "windows") {
Command::new("cmd").args(&["/C", "exit 1"]).status().unwrap()
diff --git a/library/std/src/sync/lazy_lock.rs b/library/std/src/sync/lazy_lock.rs
index 7e85d6a06..8e9ea293c 100644
--- a/library/std/src/sync/lazy_lock.rs
+++ b/library/std/src/sync/lazy_lock.rs
@@ -26,7 +26,7 @@ union Data<T, F> {
/// # Examples
///
/// ```
-/// #![feature(once_cell)]
+/// #![feature(lazy_cell)]
///
/// use std::collections::HashMap;
///
@@ -54,7 +54,7 @@ union Data<T, F> {
/// // Some("Hoyten")
/// }
/// ```
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
pub struct LazyLock<T, F = fn() -> T> {
once: Once,
data: UnsafeCell<Data<T, F>>,
@@ -64,7 +64,7 @@ impl<T, F: FnOnce() -> T> LazyLock<T, F> {
/// Creates a new lazy value with the given initializing
/// function.
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[unstable(feature = "lazy_cell", issue = "109736")]
pub const fn new(f: F) -> LazyLock<T, F> {
LazyLock { once: Once::new(), data: UnsafeCell::new(Data { f: ManuallyDrop::new(f) }) }
}
@@ -76,7 +76,7 @@ impl<T, F: FnOnce() -> T> LazyLock<T, F> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
+ /// #![feature(lazy_cell)]
///
/// use std::sync::LazyLock;
///
@@ -86,7 +86,7 @@ impl<T, F: FnOnce() -> T> LazyLock<T, F> {
/// assert_eq!(&*lazy, &92);
/// ```
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[unstable(feature = "lazy_cell", issue = "109736")]
pub fn force(this: &LazyLock<T, F>) -> &T {
this.once.call_once(|| {
// SAFETY: `call_once` only runs this closure once, ever.
@@ -122,7 +122,7 @@ impl<T, F> LazyLock<T, F> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
impl<T, F> Drop for LazyLock<T, F> {
fn drop(&mut self) {
match self.once.state() {
@@ -135,7 +135,7 @@ impl<T, F> Drop for LazyLock<T, F> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
impl<T, F: FnOnce() -> T> Deref for LazyLock<T, F> {
type Target = T;
@@ -145,7 +145,7 @@ impl<T, F: FnOnce() -> T> Deref for LazyLock<T, F> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
impl<T: Default> Default for LazyLock<T> {
/// Creates a new lazy value using `Default` as the initializing function.
#[inline]
@@ -154,7 +154,7 @@ impl<T: Default> Default for LazyLock<T> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
impl<T: fmt::Debug, F> fmt::Debug for LazyLock<T, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.get() {
@@ -166,13 +166,13 @@ impl<T: fmt::Debug, F> fmt::Debug for LazyLock<T, F> {
// We never create a `&F` from a `&LazyLock<T, F>` so it is fine
// to not impl `Sync` for `F`
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
unsafe impl<T: Sync + Send, F: Send> Sync for LazyLock<T, F> {}
// auto-derived `Send` impl is OK.
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
impl<T: RefUnwindSafe + UnwindSafe, F: UnwindSafe> RefUnwindSafe for LazyLock<T, F> {}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
impl<T: UnwindSafe, F: UnwindSafe> UnwindSafe for LazyLock<T, F> {}
#[cfg(test)]
diff --git a/library/std/src/sync/mod.rs b/library/std/src/sync/mod.rs
index 4edc95617..f6a7c0a9f 100644
--- a/library/std/src/sync/mod.rs
+++ b/library/std/src/sync/mod.rs
@@ -133,7 +133,9 @@
//! - [`Mutex`]: Mutual Exclusion mechanism, which ensures that at
//! most one thread at a time is able to access some data.
//!
-//! - [`Once`]: Used for thread-safe, one-time initialization of a
+//! - [`Once`]: Used for a thread-safe, one-time global initialization routine
+//!
+//! - [`OnceLock`]: Used for thread-safe, one-time initialization of a
//! global variable.
//!
//! - [`RwLock`]: Provides a mutual exclusion mechanism which allows
@@ -147,6 +149,7 @@
//! [`mpsc`]: crate::sync::mpsc
//! [`Mutex`]: crate::sync::Mutex
//! [`Once`]: crate::sync::Once
+//! [`OnceLock`]: crate::sync::OnceLock
//! [`RwLock`]: crate::sync::RwLock
#![stable(feature = "rust1", since = "1.0.0")]
@@ -172,9 +175,9 @@ pub use self::poison::{LockResult, PoisonError, TryLockError, TryLockResult};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
-#[unstable(feature = "once_cell", issue = "74465")]
+#[unstable(feature = "lazy_cell", issue = "109736")]
pub use self::lazy_lock::LazyLock;
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
pub use self::once_lock::OnceLock;
pub(crate) use self::remutex::{ReentrantMutex, ReentrantMutexGuard};
diff --git a/library/std/src/sync/mpmc/array.rs b/library/std/src/sync/mpmc/array.rs
index c6bb09b04..492e21d9b 100644
--- a/library/std/src/sync/mpmc/array.rs
+++ b/library/std/src/sync/mpmc/array.rs
@@ -25,7 +25,8 @@ struct Slot<T> {
/// The current stamp.
stamp: AtomicUsize,
- /// The message in this slot.
+ /// The message in this slot. Either read out in `read` or dropped through
+ /// `discard_all_messages`.
msg: UnsafeCell<MaybeUninit<T>>,
}
@@ -439,14 +440,13 @@ impl<T> Channel<T> {
Some(self.cap)
}
- /// Disconnects the channel and wakes up all blocked senders and receivers.
+ /// Disconnects senders and wakes up all blocked receivers.
///
/// Returns `true` if this call disconnected the channel.
- pub(crate) fn disconnect(&self) -> bool {
+ pub(crate) fn disconnect_senders(&self) -> bool {
let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst);
if tail & self.mark_bit == 0 {
- self.senders.disconnect();
self.receivers.disconnect();
true
} else {
@@ -454,6 +454,85 @@ impl<T> Channel<T> {
}
}
+ /// Disconnects receivers and wakes up all blocked senders.
+ ///
+ /// Returns `true` if this call disconnected the channel.
+ ///
+ /// # Safety
+ /// May only be called once upon dropping the last receiver. The
+ /// destruction of all other receivers must have been observed with acquire
+ /// ordering or stronger.
+ pub(crate) unsafe fn disconnect_receivers(&self) -> bool {
+ let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst);
+ let disconnected = if tail & self.mark_bit == 0 {
+ self.senders.disconnect();
+ true
+ } else {
+ false
+ };
+
+ self.discard_all_messages(tail);
+ disconnected
+ }
+
+ /// Discards all messages.
+ ///
+ /// `tail` should be the current (and therefore last) value of `tail`.
+ ///
+ /// # Panicking
+ /// If a destructor panics, the remaining messages are leaked, matching the
+ /// behaviour of the unbounded channel.
+ ///
+ /// # Safety
+ /// This method must only be called when dropping the last receiver. The
+ /// destruction of all other receivers must have been observed with acquire
+ /// ordering or stronger.
+ unsafe fn discard_all_messages(&self, tail: usize) {
+ debug_assert!(self.is_disconnected());
+
+ // Only receivers modify `head`, so since we are the last one,
+ // this value will not change and will not be observed (since
+ // no new messages can be sent after disconnection).
+ let mut head = self.head.load(Ordering::Relaxed);
+ let tail = tail & !self.mark_bit;
+
+ let backoff = Backoff::new();
+ loop {
+ // Deconstruct the head.
+ let index = head & (self.mark_bit - 1);
+ let lap = head & !(self.one_lap - 1);
+
+ // Inspect the corresponding slot.
+ debug_assert!(index < self.buffer.len());
+ let slot = unsafe { self.buffer.get_unchecked(index) };
+ let stamp = slot.stamp.load(Ordering::Acquire);
+
+ // If the stamp is ahead of the head by 1, we may drop the message.
+ if head + 1 == stamp {
+ head = if index + 1 < self.cap {
+ // Same lap, incremented index.
+ // Set to `{ lap: lap, mark: 0, index: index + 1 }`.
+ head + 1
+ } else {
+ // One lap forward, index wraps around to zero.
+ // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
+ lap.wrapping_add(self.one_lap)
+ };
+
+ unsafe {
+ (*slot.msg.get()).assume_init_drop();
+ }
+ // If the tail equals the head, that means the channel is empty.
+ } else if tail == head {
+ return;
+ // Otherwise, a sender is about to write into the slot, so we need
+ // to wait for it to update the stamp.
+ } else {
+ backoff.spin_heavy();
+ }
+ }
+ }
+
/// Returns `true` if the channel is disconnected.
pub(crate) fn is_disconnected(&self) -> bool {
self.tail.load(Ordering::SeqCst) & self.mark_bit != 0
@@ -483,23 +562,3 @@ impl<T> Channel<T> {
head.wrapping_add(self.one_lap) == tail & !self.mark_bit
}
}
-
-impl<T> Drop for Channel<T> {
- fn drop(&mut self) {
- // Get the index of the head.
- let hix = self.head.load(Ordering::Relaxed) & (self.mark_bit - 1);
-
- // Loop over all slots that hold a message and drop them.
- for i in 0..self.len() {
- // Compute the index of the next slot holding a message.
- let index = if hix + i < self.cap { hix + i } else { hix + i - self.cap };
-
- unsafe {
- debug_assert!(index < self.buffer.len());
- let slot = self.buffer.get_unchecked_mut(index);
- let msg = &mut *slot.msg.get();
- msg.as_mut_ptr().drop_in_place();
- }
- }
- }
-}
diff --git a/library/std/src/sync/mpmc/list.rs b/library/std/src/sync/mpmc/list.rs
index ec6c0726a..406a331a3 100644
--- a/library/std/src/sync/mpmc/list.rs
+++ b/library/std/src/sync/mpmc/list.rs
@@ -549,6 +549,18 @@ impl<T> Channel<T> {
let mut head = self.head.index.load(Ordering::Acquire);
let mut block = self.head.block.load(Ordering::Acquire);
+ // If we're going to be dropping messages we need to synchronize with initialization
+ if head >> SHIFT != tail >> SHIFT {
+ // The block can be null here only if a sender is in the process of initializing the
+ // channel while another sender managed to send a message by inserting it into the
+ // semi-initialized channel and advanced the tail.
+ // In that case, just wait until it gets initialized.
+ while block.is_null() {
+ backoff.spin_heavy();
+ block = self.head.block.load(Ordering::Acquire);
+ }
+ }
+
unsafe {
// Drop all messages between head and tail and deallocate the heap-allocated blocks.
while head >> SHIFT != tail >> SHIFT {
diff --git a/library/std/src/sync/mpmc/mod.rs b/library/std/src/sync/mpmc/mod.rs
index 7a602cecd..2068dda39 100644
--- a/library/std/src/sync/mpmc/mod.rs
+++ b/library/std/src/sync/mpmc/mod.rs
@@ -227,7 +227,7 @@ impl<T> Drop for Sender<T> {
fn drop(&mut self) {
unsafe {
match &self.flavor {
- SenderFlavor::Array(chan) => chan.release(|c| c.disconnect()),
+ SenderFlavor::Array(chan) => chan.release(|c| c.disconnect_senders()),
SenderFlavor::List(chan) => chan.release(|c| c.disconnect_senders()),
SenderFlavor::Zero(chan) => chan.release(|c| c.disconnect()),
}
@@ -403,7 +403,7 @@ impl<T> Drop for Receiver<T> {
fn drop(&mut self) {
unsafe {
match &self.flavor {
- ReceiverFlavor::Array(chan) => chan.release(|c| c.disconnect()),
+ ReceiverFlavor::Array(chan) => chan.release(|c| c.disconnect_receivers()),
ReceiverFlavor::List(chan) => chan.release(|c| c.disconnect_receivers()),
ReceiverFlavor::Zero(chan) => chan.release(|c| c.disconnect()),
}
diff --git a/library/std/src/sync/mpsc/sync_tests.rs b/library/std/src/sync/mpsc/sync_tests.rs
index 9d2f92ffc..632709fd9 100644
--- a/library/std/src/sync/mpsc/sync_tests.rs
+++ b/library/std/src/sync/mpsc/sync_tests.rs
@@ -1,5 +1,6 @@
use super::*;
use crate::env;
+use crate::rc::Rc;
use crate::sync::mpmc::SendTimeoutError;
use crate::thread;
use crate::time::Duration;
@@ -656,3 +657,15 @@ fn issue_15761() {
repro()
}
}
+
+#[test]
+fn drop_unreceived() {
+ let (tx, rx) = sync_channel::<Rc<()>>(1);
+ let msg = Rc::new(());
+ let weak = Rc::downgrade(&msg);
+ assert!(tx.send(msg).is_ok());
+ drop(rx);
+ // Messages should be dropped immediately when the last receiver is destroyed.
+ assert!(weak.upgrade().is_none());
+ drop(tx);
+}
diff --git a/library/std/src/sync/mutex.rs b/library/std/src/sync/mutex.rs
index 065045f44..b8fec6902 100644
--- a/library/std/src/sync/mutex.rs
+++ b/library/std/src/sync/mutex.rs
@@ -107,8 +107,8 @@ use crate::sys::locks as sys;
/// *guard += 1;
/// ```
///
-/// It is sometimes necessary to manually drop the mutex guard to unlock it
-/// sooner than the end of the enclosing scope.
+/// To unlock a mutex guard sooner than the end of the enclosing scope,
+/// either create an inner scope or drop the guard manually.
///
/// ```
/// use std::sync::{Arc, Mutex};
@@ -125,11 +125,18 @@ use crate::sys::locks as sys;
/// let res_mutex_clone = Arc::clone(&res_mutex);
///
/// threads.push(thread::spawn(move || {
-/// let mut data = data_mutex_clone.lock().unwrap();
-/// // This is the result of some important and long-ish work.
-/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
-/// data.push(result);
-/// drop(data);
+/// // Here we use a block to limit the lifetime of the lock guard.
+/// let result = {
+/// let mut data = data_mutex_clone.lock().unwrap();
+/// // This is the result of some important and long-ish work.
+/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
+/// data.push(result);
+/// result
+/// // The mutex guard gets dropped here, together with any other values
+/// // created in the critical section.
+/// };
+/// // The guard created here is a temporary dropped at the end of the statement, i.e.
+/// // the lock would not remain being held even if the thread did some additional work.
/// *res_mutex_clone.lock().unwrap() += result;
/// }));
/// });
@@ -146,6 +153,8 @@ use crate::sys::locks as sys;
/// // It's even more important here than in the threads because we `.join` the
/// // threads after that. If we had not dropped the mutex guard, a thread could
/// // be waiting forever for it, causing a deadlock.
+/// // As in the threads, a block could have been used instead of calling the
+/// // `drop` function.
/// drop(data);
/// // Here the mutex guard is not assigned to a variable and so, even if the
/// // scope does not end after this line, the mutex is still released: there is
@@ -160,6 +169,7 @@ use crate::sys::locks as sys;
///
/// assert_eq!(*res_mutex.lock().unwrap(), 800);
/// ```
+///
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "Mutex")]
pub struct Mutex<T: ?Sized> {
diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs
index ed339ca5d..ab25a5bcc 100644
--- a/library/std/src/sync/once_lock.rs
+++ b/library/std/src/sync/once_lock.rs
@@ -14,8 +14,6 @@ use crate::sync::Once;
/// # Examples
///
/// ```
-/// #![feature(once_cell)]
-///
/// use std::sync::OnceLock;
///
/// static CELL: OnceLock<String> = OnceLock::new();
@@ -32,7 +30,7 @@ use crate::sync::Once;
/// assert!(value.is_some());
/// assert_eq!(value.unwrap().as_str(), "Hello, World!");
/// ```
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
pub struct OnceLock<T> {
once: Once,
// Whether or not the value is initialized is tracked by `once.is_completed()`.
@@ -40,8 +38,6 @@ pub struct OnceLock<T> {
/// `PhantomData` to make sure dropck understands we're dropping T in our Drop impl.
///
/// ```compile_fail,E0597
- /// #![feature(once_cell)]
- ///
/// use std::sync::OnceLock;
///
/// struct A<'a>(&'a str);
@@ -63,7 +59,8 @@ impl<T> OnceLock<T> {
/// Creates a new empty cell.
#[inline]
#[must_use]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
+ #[rustc_const_stable(feature = "once_cell", since = "1.70.0")]
pub const fn new() -> OnceLock<T> {
OnceLock {
once: Once::new(),
@@ -77,7 +74,7 @@ impl<T> OnceLock<T> {
/// Returns `None` if the cell is empty, or being initialized. This
/// method never blocks.
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
pub fn get(&self) -> Option<&T> {
if self.is_initialized() {
// Safe b/c checked is_initialized
@@ -91,7 +88,7 @@ impl<T> OnceLock<T> {
///
/// Returns `None` if the cell is empty. This method never blocks.
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
pub fn get_mut(&mut self) -> Option<&mut T> {
if self.is_initialized() {
// Safe b/c checked is_initialized and we have a unique access
@@ -111,8 +108,6 @@ impl<T> OnceLock<T> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
- ///
/// use std::sync::OnceLock;
///
/// static CELL: OnceLock<i32> = OnceLock::new();
@@ -129,7 +124,7 @@ impl<T> OnceLock<T> {
/// }
/// ```
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
pub fn set(&self, value: T) -> Result<(), T> {
let mut value = Some(value);
self.get_or_init(|| value.take().unwrap());
@@ -158,8 +153,6 @@ impl<T> OnceLock<T> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
- ///
/// use std::sync::OnceLock;
///
/// let cell = OnceLock::new();
@@ -169,7 +162,7 @@ impl<T> OnceLock<T> {
/// assert_eq!(value, &92);
/// ```
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
pub fn get_or_init<F>(&self, f: F) -> &T
where
F: FnOnce() -> T,
@@ -195,7 +188,7 @@ impl<T> OnceLock<T> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
+ /// #![feature(once_cell_try)]
///
/// use std::sync::OnceLock;
///
@@ -209,7 +202,7 @@ impl<T> OnceLock<T> {
/// assert_eq!(cell.get(), Some(&92))
/// ```
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[unstable(feature = "once_cell_try", issue = "109737")]
pub fn get_or_try_init<F, E>(&self, f: F) -> Result<&T, E>
where
F: FnOnce() -> Result<T, E>,
@@ -236,8 +229,6 @@ impl<T> OnceLock<T> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
- ///
/// use std::sync::OnceLock;
///
/// let cell: OnceLock<String> = OnceLock::new();
@@ -248,7 +239,7 @@ impl<T> OnceLock<T> {
/// assert_eq!(cell.into_inner(), Some("hello".to_string()));
/// ```
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
pub fn into_inner(mut self) -> Option<T> {
self.take()
}
@@ -262,8 +253,6 @@ impl<T> OnceLock<T> {
/// # Examples
///
/// ```
- /// #![feature(once_cell)]
- ///
/// use std::sync::OnceLock;
///
/// let mut cell: OnceLock<String> = OnceLock::new();
@@ -275,7 +264,7 @@ impl<T> OnceLock<T> {
/// assert_eq!(cell.get(), None);
/// ```
#[inline]
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[stable(feature = "once_cell", since = "1.70.0")]
pub fn take(&mut self) -> Option<T> {
if self.is_initialized() {
self.once = Once::new();
@@ -344,17 +333,17 @@ impl<T> OnceLock<T> {
// scoped thread B, which fills the cell, which is
// then destroyed by A. That is, destructor observes
// a sent value.
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
unsafe impl<T: Sync + Send> Sync for OnceLock<T> {}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
unsafe impl<T: Send> Send for OnceLock<T> {}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T: RefUnwindSafe + UnwindSafe> RefUnwindSafe for OnceLock<T> {}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T: UnwindSafe> UnwindSafe for OnceLock<T> {}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
impl<T> const Default for OnceLock<T> {
/// Creates a new empty cell.
@@ -362,8 +351,6 @@ impl<T> const Default for OnceLock<T> {
/// # Example
///
/// ```
- /// #![feature(once_cell)]
- ///
/// use std::sync::OnceLock;
///
/// fn main() {
@@ -376,7 +363,7 @@ impl<T> const Default for OnceLock<T> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T: fmt::Debug> fmt::Debug for OnceLock<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.get() {
@@ -386,7 +373,7 @@ impl<T: fmt::Debug> fmt::Debug for OnceLock<T> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T: Clone> Clone for OnceLock<T> {
#[inline]
fn clone(&self) -> OnceLock<T> {
@@ -401,15 +388,13 @@ impl<T: Clone> Clone for OnceLock<T> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T> From<T> for OnceLock<T> {
/// Create a new cell with its contents set to `value`.
///
/// # Example
///
/// ```
- /// #![feature(once_cell)]
- ///
/// use std::sync::OnceLock;
///
/// # fn main() -> Result<(), i32> {
@@ -430,7 +415,7 @@ impl<T> From<T> for OnceLock<T> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T: PartialEq> PartialEq for OnceLock<T> {
#[inline]
fn eq(&self, other: &OnceLock<T>) -> bool {
@@ -438,10 +423,10 @@ impl<T: PartialEq> PartialEq for OnceLock<T> {
}
}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
impl<T: Eq> Eq for OnceLock<T> {}
-#[unstable(feature = "once_cell", issue = "74465")]
+#[stable(feature = "once_cell", since = "1.70.0")]
unsafe impl<#[may_dangle] T> Drop for OnceLock<T> {
#[inline]
fn drop(&mut self) {
diff --git a/library/std/src/sync/remutex.rs b/library/std/src/sync/remutex.rs
index 4c054da64..519ec2c32 100644
--- a/library/std/src/sync/remutex.rs
+++ b/library/std/src/sync/remutex.rs
@@ -35,7 +35,7 @@ use crate::sys::locks as sys;
/// `owner` can be checked by other threads that want to see if they already
/// hold the lock, so needs to be atomic. If it compares equal, we're on the
/// same thread that holds the mutex and memory access can use relaxed ordering
-/// since we're not dealing with multiple threads. If it compares unequal,
+/// since we're not dealing with multiple threads. If it's not equal,
/// synchronization is left to the mutex, making relaxed memory ordering for
/// the `owner` field fine in all cases.
pub struct ReentrantMutex<T> {
diff --git a/library/std/src/sys/common/alloc.rs b/library/std/src/sys/common/alloc.rs
index 403a5e627..a5fcbdf39 100644
--- a/library/std/src/sys/common/alloc.rs
+++ b/library/std/src/sys/common/alloc.rs
@@ -22,6 +22,7 @@ pub const MIN_ALIGN: usize = 8;
#[cfg(any(
target_arch = "x86_64",
target_arch = "aarch64",
+ target_arch = "loongarch64",
target_arch = "mips64",
target_arch = "s390x",
target_arch = "sparc64",
diff --git a/library/std/src/sys/common/mod.rs b/library/std/src/sys/common/mod.rs
index 29fc0835d..2b8782ddf 100644
--- a/library/std/src/sys/common/mod.rs
+++ b/library/std/src/sys/common/mod.rs
@@ -12,6 +12,7 @@
pub mod alloc;
pub mod small_c_string;
+pub mod thread_local;
#[cfg(test)]
mod tests;
diff --git a/library/std/src/sys/common/thread_local/fast_local.rs b/library/std/src/sys/common/thread_local/fast_local.rs
new file mode 100644
index 000000000..e229eb16a
--- /dev/null
+++ b/library/std/src/sys/common/thread_local/fast_local.rs
@@ -0,0 +1,254 @@
+#[doc(hidden)]
+#[macro_export]
+#[allow_internal_unstable(
+ thread_local_internals,
+ cfg_target_thread_local,
+ thread_local,
+ libstd_thread_internals
+)]
+#[allow_internal_unsafe]
+macro_rules! __thread_local_inner {
+ // used to generate the `LocalKey` value for const-initialized thread locals
+ (@key $t:ty, const $init:expr) => {{
+ #[cfg_attr(not(bootstrap), inline)]
+ #[deny(unsafe_op_in_unsafe_fn)]
+ unsafe fn __getit(
+ _init: $crate::option::Option<&mut $crate::option::Option<$t>>,
+ ) -> $crate::option::Option<&'static $t> {
+ const INIT_EXPR: $t = $init;
+ // If the platform has support for `#[thread_local]`, use it.
+ #[thread_local]
+ static mut VAL: $t = INIT_EXPR;
+
+ // If a dtor isn't needed we can do something "very raw" and
+ // just get going.
+ if !$crate::mem::needs_drop::<$t>() {
+ unsafe {
+ return $crate::option::Option::Some(&VAL)
+ }
+ }
+
+ // 0 == dtor not registered
+ // 1 == dtor registered, dtor not run
+ // 2 == dtor registered and is running or has run
+ #[thread_local]
+ static mut STATE: $crate::primitive::u8 = 0;
+
+ unsafe extern "C" fn destroy(ptr: *mut $crate::primitive::u8) {
+ let ptr = ptr as *mut $t;
+
+ unsafe {
+ $crate::debug_assert_eq!(STATE, 1);
+ STATE = 2;
+ $crate::ptr::drop_in_place(ptr);
+ }
+ }
+
+ unsafe {
+ match STATE {
+ // 0 == we haven't registered a destructor, so do
+ // so now.
+ 0 => {
+ $crate::thread::__LocalKeyInner::<$t>::register_dtor(
+ $crate::ptr::addr_of_mut!(VAL) as *mut $crate::primitive::u8,
+ destroy,
+ );
+ STATE = 1;
+ $crate::option::Option::Some(&VAL)
+ }
+ // 1 == the destructor is registered and the value
+ // is valid, so return the pointer.
+ 1 => $crate::option::Option::Some(&VAL),
+ // otherwise the destructor has already run, so we
+ // can't give access.
+ _ => $crate::option::Option::None,
+ }
+ }
+ }
+
+ unsafe {
+ $crate::thread::LocalKey::new(__getit)
+ }
+ }};
+
+ // used to generate the `LocalKey` value for `thread_local!`
+ (@key $t:ty, $init:expr) => {
+ {
+ #[inline]
+ fn __init() -> $t { $init }
+
+ #[cfg_attr(not(bootstrap), inline)]
+ unsafe fn __getit(
+ init: $crate::option::Option<&mut $crate::option::Option<$t>>,
+ ) -> $crate::option::Option<&'static $t> {
+ #[thread_local]
+ static __KEY: $crate::thread::__LocalKeyInner<$t> =
+ $crate::thread::__LocalKeyInner::<$t>::new();
+
+ // FIXME: remove the #[allow(...)] marker when macros don't
+ // raise warning for missing/extraneous unsafe blocks anymore.
+ // See https://github.com/rust-lang/rust/issues/74838.
+ #[allow(unused_unsafe)]
+ unsafe {
+ __KEY.get(move || {
+ if let $crate::option::Option::Some(init) = init {
+ if let $crate::option::Option::Some(value) = init.take() {
+ return value;
+ } else if $crate::cfg!(debug_assertions) {
+ $crate::unreachable!("missing default value");
+ }
+ }
+ __init()
+ })
+ }
+ }
+
+ unsafe {
+ $crate::thread::LocalKey::new(__getit)
+ }
+ }
+ };
+ ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $($init:tt)*) => {
+ $(#[$attr])* $vis const $name: $crate::thread::LocalKey<$t> =
+ $crate::__thread_local_inner!(@key $t, $($init)*);
+ }
+}
+
+#[doc(hidden)]
+pub mod fast {
+ use super::super::lazy::LazyKeyInner;
+ use crate::cell::Cell;
+ use crate::sys::thread_local_dtor::register_dtor;
+ use crate::{fmt, mem, panic};
+
+ #[derive(Copy, Clone)]
+ enum DtorState {
+ Unregistered,
+ Registered,
+ RunningOrHasRun,
+ }
+
+ // This data structure has been carefully constructed so that the fast path
+ // only contains one branch on x86. That optimization is necessary to avoid
+ // duplicated tls lookups on OSX.
+ //
+ // LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722
+ pub struct Key<T> {
+ // If `LazyKeyInner::get` returns `None`, that indicates either:
+ // * The value has never been initialized
+ // * The value is being recursively initialized
+ // * The value has already been destroyed or is being destroyed
+ // To determine which kind of `None`, check `dtor_state`.
+ //
+ // This is very optimizer friendly for the fast path - initialized but
+ // not yet dropped.
+ inner: LazyKeyInner<T>,
+
+ // Metadata to keep track of the state of the destructor. Remember that
+ // this variable is thread-local, not global.
+ dtor_state: Cell<DtorState>,
+ }
+
+ impl<T> fmt::Debug for Key<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Key").finish_non_exhaustive()
+ }
+ }
+
+ impl<T> Key<T> {
+ pub const fn new() -> Key<T> {
+ Key { inner: LazyKeyInner::new(), dtor_state: Cell::new(DtorState::Unregistered) }
+ }
+
+ // note that this is just a publicly-callable function only for the
+ // const-initialized form of thread locals, basically a way to call the
+ // free `register_dtor` function defined elsewhere in std.
+ pub unsafe fn register_dtor(a: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
+ unsafe {
+ register_dtor(a, dtor);
+ }
+ }
+
+ pub unsafe fn get<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> {
+ // SAFETY: See the definitions of `LazyKeyInner::get` and
+ // `try_initialize` for more information.
+ //
+ // The caller must ensure no mutable references are ever active to
+ // the inner cell or the inner T when this is called.
+ // The `try_initialize` is dependant on the passed `init` function
+ // for this.
+ unsafe {
+ match self.inner.get() {
+ Some(val) => Some(val),
+ None => self.try_initialize(init),
+ }
+ }
+ }
+
+ // `try_initialize` is only called once per fast thread local variable,
+ // except in corner cases where thread_local dtors reference other
+ // thread_local's, or it is being recursively initialized.
+ //
+ // Macos: Inlining this function can cause two `tlv_get_addr` calls to
+ // be performed for every call to `Key::get`.
+ // LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722
+ #[inline(never)]
+ unsafe fn try_initialize<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> {
+ // SAFETY: See comment above (this function doc).
+ if !mem::needs_drop::<T>() || unsafe { self.try_register_dtor() } {
+ // SAFETY: See comment above (this function doc).
+ Some(unsafe { self.inner.initialize(init) })
+ } else {
+ None
+ }
+ }
+
+ // `try_register_dtor` is only called once per fast thread local
+ // variable, except in corner cases where thread_local dtors reference
+ // other thread_local's, or it is being recursively initialized.
+ unsafe fn try_register_dtor(&self) -> bool {
+ match self.dtor_state.get() {
+ DtorState::Unregistered => {
+ // SAFETY: dtor registration happens before initialization.
+ // Passing `self` as a pointer while using `destroy_value<T>`
+ // is safe because the function will build a pointer to a
+ // Key<T>, which is the type of self and so find the correct
+ // size.
+ unsafe { register_dtor(self as *const _ as *mut u8, destroy_value::<T>) };
+ self.dtor_state.set(DtorState::Registered);
+ true
+ }
+ DtorState::Registered => {
+ // recursively initialized
+ true
+ }
+ DtorState::RunningOrHasRun => false,
+ }
+ }
+ }
+
+ unsafe extern "C" fn destroy_value<T>(ptr: *mut u8) {
+ let ptr = ptr as *mut Key<T>;
+
+ // SAFETY:
+ //
+ // The pointer `ptr` has been built just above and comes from
+ // `try_register_dtor` where it is originally a Key<T> coming from `self`,
+ // making it non-NUL and of the correct type.
+ //
+ // Right before we run the user destructor be sure to set the
+ // `Option<T>` to `None`, and `dtor_state` to `RunningOrHasRun`. This
+ // causes future calls to `get` to run `try_initialize_drop` again,
+ // which will now fail, and return `None`.
+ //
+ // Wrap the call in a catch to ensure unwinding is caught in the event
+ // a panic takes place in a destructor.
+ if let Err(_) = panic::catch_unwind(panic::AssertUnwindSafe(|| unsafe {
+ let value = (*ptr).inner.take();
+ (*ptr).dtor_state.set(DtorState::RunningOrHasRun);
+ drop(value);
+ })) {
+ rtabort!("thread local panicked on drop");
+ }
+ }
+}
diff --git a/library/std/src/sys/common/thread_local/mod.rs b/library/std/src/sys/common/thread_local/mod.rs
new file mode 100644
index 000000000..1fee84a04
--- /dev/null
+++ b/library/std/src/sys/common/thread_local/mod.rs
@@ -0,0 +1,109 @@
+//! The following module declarations are outside cfg_if because the internal
+//! `__thread_local_internal` macro does not seem to be exported properly when using cfg_if
+#![unstable(feature = "thread_local_internals", reason = "should not be necessary", issue = "none")]
+
+#[cfg(all(target_thread_local, not(all(target_family = "wasm", not(target_feature = "atomics")))))]
+mod fast_local;
+#[cfg(all(
+ not(target_thread_local),
+ not(all(target_family = "wasm", not(target_feature = "atomics")))
+))]
+mod os_local;
+#[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
+mod static_local;
+
+#[cfg(not(test))]
+cfg_if::cfg_if! {
+ if #[cfg(all(target_family = "wasm", not(target_feature = "atomics")))] {
+ #[doc(hidden)]
+ pub use static_local::statik::Key;
+ } else if #[cfg(all(target_thread_local, not(all(target_family = "wasm", not(target_feature = "atomics")))))] {
+ #[doc(hidden)]
+ pub use fast_local::fast::Key;
+ } else if #[cfg(all(not(target_thread_local), not(all(target_family = "wasm", not(target_feature = "atomics")))))] {
+ #[doc(hidden)]
+ pub use os_local::os::Key;
+ }
+}
+
+#[doc(hidden)]
+#[cfg(test)]
+pub use realstd::thread::__LocalKeyInner as Key;
+
+mod lazy {
+ use crate::cell::UnsafeCell;
+ use crate::hint;
+ use crate::mem;
+
+ pub struct LazyKeyInner<T> {
+ inner: UnsafeCell<Option<T>>,
+ }
+
+ impl<T> LazyKeyInner<T> {
+ pub const fn new() -> LazyKeyInner<T> {
+ LazyKeyInner { inner: UnsafeCell::new(None) }
+ }
+
+ pub unsafe fn get(&self) -> Option<&'static T> {
+ // SAFETY: The caller must ensure no reference is ever handed out to
+ // the inner cell nor mutable reference to the Option<T> inside said
+ // cell. This make it safe to hand a reference, though the lifetime
+ // of 'static is itself unsafe, making the get method unsafe.
+ unsafe { (*self.inner.get()).as_ref() }
+ }
+
+ /// The caller must ensure that no reference is active: this method
+ /// needs unique access.
+ pub unsafe fn initialize<F: FnOnce() -> T>(&self, init: F) -> &'static T {
+ // Execute the initialization up front, *then* move it into our slot,
+ // just in case initialization fails.
+ let value = init();
+ let ptr = self.inner.get();
+
+ // SAFETY:
+ //
+ // note that this can in theory just be `*ptr = Some(value)`, but due to
+ // the compiler will currently codegen that pattern with something like:
+ //
+ // ptr::drop_in_place(ptr)
+ // ptr::write(ptr, Some(value))
+ //
+ // Due to this pattern it's possible for the destructor of the value in
+ // `ptr` (e.g., if this is being recursively initialized) to re-access
+ // TLS, in which case there will be a `&` and `&mut` pointer to the same
+ // value (an aliasing violation). To avoid setting the "I'm running a
+ // destructor" flag we just use `mem::replace` which should sequence the
+ // operations a little differently and make this safe to call.
+ //
+ // The precondition also ensures that we are the only one accessing
+ // `self` at the moment so replacing is fine.
+ unsafe {
+ let _ = mem::replace(&mut *ptr, Some(value));
+ }
+
+ // SAFETY: With the call to `mem::replace` it is guaranteed there is
+ // a `Some` behind `ptr`, not a `None` so `unreachable_unchecked`
+ // will never be reached.
+ unsafe {
+ // After storing `Some` we want to get a reference to the contents of
+ // what we just stored. While we could use `unwrap` here and it should
+ // always work it empirically doesn't seem to always get optimized away,
+ // which means that using something like `try_with` can pull in
+ // panicking code and cause a large size bloat.
+ match *ptr {
+ Some(ref x) => x,
+ None => hint::unreachable_unchecked(),
+ }
+ }
+ }
+
+ /// The other methods hand out references while taking &self.
+ /// As such, callers of this method must ensure no `&` and `&mut` are
+ /// available and used at the same time.
+ #[allow(unused)]
+ pub unsafe fn take(&mut self) -> Option<T> {
+ // SAFETY: See doc comment for this method.
+ unsafe { (*self.inner.get()).take() }
+ }
+ }
+}
diff --git a/library/std/src/sys/common/thread_local/os_local.rs b/library/std/src/sys/common/thread_local/os_local.rs
new file mode 100644
index 000000000..1442a397e
--- /dev/null
+++ b/library/std/src/sys/common/thread_local/os_local.rs
@@ -0,0 +1,197 @@
+#[doc(hidden)]
+#[macro_export]
+#[allow_internal_unstable(
+ thread_local_internals,
+ cfg_target_thread_local,
+ thread_local,
+ libstd_thread_internals
+)]
+#[allow_internal_unsafe]
+macro_rules! __thread_local_inner {
+ // used to generate the `LocalKey` value for const-initialized thread locals
+ (@key $t:ty, const $init:expr) => {{
+ #[cfg_attr(not(bootstrap), inline)]
+ #[deny(unsafe_op_in_unsafe_fn)]
+ unsafe fn __getit(
+ _init: $crate::option::Option<&mut $crate::option::Option<$t>>,
+ ) -> $crate::option::Option<&'static $t> {
+ const INIT_EXPR: $t = $init;
+
+ // On platforms without `#[thread_local]` we fall back to the
+ // same implementation as below for os thread locals.
+ #[inline]
+ const fn __init() -> $t { INIT_EXPR }
+ static __KEY: $crate::thread::__LocalKeyInner<$t> =
+ $crate::thread::__LocalKeyInner::new();
+ #[allow(unused_unsafe)]
+ unsafe {
+ __KEY.get(move || {
+ if let $crate::option::Option::Some(init) = _init {
+ if let $crate::option::Option::Some(value) = init.take() {
+ return value;
+ } else if $crate::cfg!(debug_assertions) {
+ $crate::unreachable!("missing initial value");
+ }
+ }
+ __init()
+ })
+ }
+ }
+
+ unsafe {
+ $crate::thread::LocalKey::new(__getit)
+ }
+ }};
+
+ // used to generate the `LocalKey` value for `thread_local!`
+ (@key $t:ty, $init:expr) => {
+ {
+ #[inline]
+ fn __init() -> $t { $init }
+
+ // `#[inline] does not work on windows-gnu due to linking errors around dllimports.
+ // See https://github.com/rust-lang/rust/issues/109797.
+ #[cfg_attr(not(windows), inline)]
+ unsafe fn __getit(
+ init: $crate::option::Option<&mut $crate::option::Option<$t>>,
+ ) -> $crate::option::Option<&'static $t> {
+ static __KEY: $crate::thread::__LocalKeyInner<$t> =
+ $crate::thread::__LocalKeyInner::new();
+
+ // FIXME: remove the #[allow(...)] marker when macros don't
+ // raise warning for missing/extraneous unsafe blocks anymore.
+ // See https://github.com/rust-lang/rust/issues/74838.
+ #[allow(unused_unsafe)]
+ unsafe {
+ __KEY.get(move || {
+ if let $crate::option::Option::Some(init) = init {
+ if let $crate::option::Option::Some(value) = init.take() {
+ return value;
+ } else if $crate::cfg!(debug_assertions) {
+ $crate::unreachable!("missing default value");
+ }
+ }
+ __init()
+ })
+ }
+ }
+
+ unsafe {
+ $crate::thread::LocalKey::new(__getit)
+ }
+ }
+ };
+ ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $($init:tt)*) => {
+ $(#[$attr])* $vis const $name: $crate::thread::LocalKey<$t> =
+ $crate::__thread_local_inner!(@key $t, $($init)*);
+ }
+}
+
+#[doc(hidden)]
+pub mod os {
+ use super::super::lazy::LazyKeyInner;
+ use crate::cell::Cell;
+ use crate::sys_common::thread_local_key::StaticKey as OsStaticKey;
+ use crate::{fmt, marker, panic, ptr};
+
+ /// Use a regular global static to store this key; the state provided will then be
+ /// thread-local.
+ pub struct Key<T> {
+ // OS-TLS key that we'll use to key off.
+ os: OsStaticKey,
+ marker: marker::PhantomData<Cell<T>>,
+ }
+
+ impl<T> fmt::Debug for Key<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Key").finish_non_exhaustive()
+ }
+ }
+
+ unsafe impl<T> Sync for Key<T> {}
+
+ struct Value<T: 'static> {
+ inner: LazyKeyInner<T>,
+ key: &'static Key<T>,
+ }
+
+ impl<T: 'static> Key<T> {
+ #[rustc_const_unstable(feature = "thread_local_internals", issue = "none")]
+ pub const fn new() -> Key<T> {
+ Key { os: OsStaticKey::new(Some(destroy_value::<T>)), marker: marker::PhantomData }
+ }
+
+ /// It is a requirement for the caller to ensure that no mutable
+ /// reference is active when this method is called.
+ pub unsafe fn get(&'static self, init: impl FnOnce() -> T) -> Option<&'static T> {
+ // SAFETY: See the documentation for this method.
+ let ptr = unsafe { self.os.get() as *mut Value<T> };
+ if ptr.addr() > 1 {
+ // SAFETY: the check ensured the pointer is safe (its destructor
+ // is not running) + it is coming from a trusted source (self).
+ if let Some(ref value) = unsafe { (*ptr).inner.get() } {
+ return Some(value);
+ }
+ }
+ // SAFETY: At this point we are sure we have no value and so
+ // initializing (or trying to) is safe.
+ unsafe { self.try_initialize(init) }
+ }
+
+ // `try_initialize` is only called once per os thread local variable,
+ // except in corner cases where thread_local dtors reference other
+ // thread_local's, or it is being recursively initialized.
+ unsafe fn try_initialize(&'static self, init: impl FnOnce() -> T) -> Option<&'static T> {
+ // SAFETY: No mutable references are ever handed out meaning getting
+ // the value is ok.
+ let ptr = unsafe { self.os.get() as *mut Value<T> };
+ if ptr.addr() == 1 {
+ // destructor is running
+ return None;
+ }
+
+ let ptr = if ptr.is_null() {
+ // If the lookup returned null, we haven't initialized our own
+ // local copy, so do that now.
+ let ptr = Box::into_raw(Box::new(Value { inner: LazyKeyInner::new(), key: self }));
+ // SAFETY: At this point we are sure there is no value inside
+ // ptr so setting it will not affect anyone else.
+ unsafe {
+ self.os.set(ptr as *mut u8);
+ }
+ ptr
+ } else {
+ // recursive initialization
+ ptr
+ };
+
+ // SAFETY: ptr has been ensured as non-NUL just above an so can be
+ // dereferenced safely.
+ unsafe { Some((*ptr).inner.initialize(init)) }
+ }
+ }
+
+ unsafe extern "C" fn destroy_value<T: 'static>(ptr: *mut u8) {
+ // SAFETY:
+ //
+ // The OS TLS ensures that this key contains a null value when this
+ // destructor starts to run. We set it back to a sentinel value of 1 to
+ // ensure that any future calls to `get` for this thread will return
+ // `None`.
+ //
+ // Note that to prevent an infinite loop we reset it back to null right
+ // before we return from the destructor ourselves.
+ //
+ // Wrap the call in a catch to ensure unwinding is caught in the event
+ // a panic takes place in a destructor.
+ if let Err(_) = panic::catch_unwind(|| unsafe {
+ let ptr = Box::from_raw(ptr as *mut Value<T>);
+ let key = ptr.key;
+ key.os.set(ptr::invalid_mut(1));
+ drop(ptr);
+ key.os.set(ptr::null_mut());
+ }) {
+ rtabort!("thread local panicked on drop");
+ }
+ }
+}
diff --git a/library/std/src/sys/common/thread_local/static_local.rs b/library/std/src/sys/common/thread_local/static_local.rs
new file mode 100644
index 000000000..ec4f2a12b
--- /dev/null
+++ b/library/std/src/sys/common/thread_local/static_local.rs
@@ -0,0 +1,115 @@
+#[doc(hidden)]
+#[macro_export]
+#[allow_internal_unstable(
+ thread_local_internals,
+ cfg_target_thread_local,
+ thread_local,
+ libstd_thread_internals
+)]
+#[allow_internal_unsafe]
+macro_rules! __thread_local_inner {
+ // used to generate the `LocalKey` value for const-initialized thread locals
+ (@key $t:ty, const $init:expr) => {{
+ #[inline] // see comments below
+ #[deny(unsafe_op_in_unsafe_fn)]
+ unsafe fn __getit(
+ _init: $crate::option::Option<&mut $crate::option::Option<$t>>,
+ ) -> $crate::option::Option<&'static $t> {
+ const INIT_EXPR: $t = $init;
+
+ // wasm without atomics maps directly to `static mut`, and dtors
+ // aren't implemented because thread dtors aren't really a thing
+ // on wasm right now
+ //
+ // FIXME(#84224) this should come after the `target_thread_local`
+ // block.
+ static mut VAL: $t = INIT_EXPR;
+ unsafe { $crate::option::Option::Some(&VAL) }
+ }
+
+ unsafe {
+ $crate::thread::LocalKey::new(__getit)
+ }
+ }};
+
+ // used to generate the `LocalKey` value for `thread_local!`
+ (@key $t:ty, $init:expr) => {
+ {
+ #[inline]
+ fn __init() -> $t { $init }
+ #[inline]
+ unsafe fn __getit(
+ init: $crate::option::Option<&mut $crate::option::Option<$t>>,
+ ) -> $crate::option::Option<&'static $t> {
+ static __KEY: $crate::thread::__LocalKeyInner<$t> =
+ $crate::thread::__LocalKeyInner::new();
+
+ // FIXME: remove the #[allow(...)] marker when macros don't
+ // raise warning for missing/extraneous unsafe blocks anymore.
+ // See https://github.com/rust-lang/rust/issues/74838.
+ #[allow(unused_unsafe)]
+ unsafe {
+ __KEY.get(move || {
+ if let $crate::option::Option::Some(init) = init {
+ if let $crate::option::Option::Some(value) = init.take() {
+ return value;
+ } else if $crate::cfg!(debug_assertions) {
+ $crate::unreachable!("missing default value");
+ }
+ }
+ __init()
+ })
+ }
+ }
+
+ unsafe {
+ $crate::thread::LocalKey::new(__getit)
+ }
+ }
+ };
+ ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $($init:tt)*) => {
+ $(#[$attr])* $vis const $name: $crate::thread::LocalKey<$t> =
+ $crate::__thread_local_inner!(@key $t, $($init)*);
+ }
+}
+
+/// On some targets like wasm there's no threads, so no need to generate
+/// thread locals and we can instead just use plain statics!
+#[doc(hidden)]
+pub mod statik {
+ use super::super::lazy::LazyKeyInner;
+ use crate::fmt;
+
+ pub struct Key<T> {
+ inner: LazyKeyInner<T>,
+ }
+
+ unsafe impl<T> Sync for Key<T> {}
+
+ impl<T> fmt::Debug for Key<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Key").finish_non_exhaustive()
+ }
+ }
+
+ impl<T> Key<T> {
+ pub const fn new() -> Key<T> {
+ Key { inner: LazyKeyInner::new() }
+ }
+
+ pub unsafe fn get(&self, init: impl FnOnce() -> T) -> Option<&'static T> {
+ // SAFETY: The caller must ensure no reference is ever handed out to
+ // the inner cell nor mutable reference to the Option<T> inside said
+ // cell. This make it safe to hand a reference, though the lifetime
+ // of 'static is itself unsafe, making the get method unsafe.
+ let value = unsafe {
+ match self.inner.get() {
+ Some(ref value) => value,
+ None => self.inner.initialize(init),
+ }
+ };
+
+ Some(value)
+ }
+ }
+}
diff --git a/library/std/src/sys/hermit/fs.rs b/library/std/src/sys/hermit/fs.rs
index c966f2177..cf0b27176 100644
--- a/library/std/src/sys/hermit/fs.rs
+++ b/library/std/src/sys/hermit/fs.rs
@@ -202,7 +202,7 @@ impl OpenOptions {
create: false,
create_new: false,
// system-specific
- mode: 0x777,
+ mode: 0o777,
}
}
diff --git a/library/std/src/sys/hermit/futex.rs b/library/std/src/sys/hermit/futex.rs
index b64c174b0..427d8ff6f 100644
--- a/library/std/src/sys/hermit/futex.rs
+++ b/library/std/src/sys/hermit/futex.rs
@@ -16,7 +16,7 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
let r = unsafe {
abi::futex_wait(
- futex.as_mut_ptr(),
+ futex.as_ptr(),
expected,
timespec.as_ref().map_or(null(), |t| t as *const abi::timespec),
abi::FUTEX_RELATIVE_TIMEOUT,
@@ -28,12 +28,12 @@ pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -
#[inline]
pub fn futex_wake(futex: &AtomicU32) -> bool {
- unsafe { abi::futex_wake(futex.as_mut_ptr(), 1) > 0 }
+ unsafe { abi::futex_wake(futex.as_ptr(), 1) > 0 }
}
#[inline]
pub fn futex_wake_all(futex: &AtomicU32) {
unsafe {
- abi::futex_wake(futex.as_mut_ptr(), i32::MAX);
+ abi::futex_wake(futex.as_ptr(), i32::MAX);
}
}
diff --git a/library/std/src/sys/hermit/mod.rs b/library/std/src/sys/hermit/mod.rs
index d34a4cfed..c7cb84667 100644
--- a/library/std/src/sys/hermit/mod.rs
+++ b/library/std/src/sys/hermit/mod.rs
@@ -15,7 +15,6 @@
#![allow(missing_docs, nonstandard_style, unsafe_op_in_unsafe_fn)]
-use crate::intrinsics;
use crate::os::raw::c_char;
pub mod alloc;
@@ -76,9 +75,18 @@ pub fn abort_internal() -> ! {
}
}
-// FIXME: just a workaround to test the system
pub fn hashmap_random_keys() -> (u64, u64) {
- (1, 2)
+ let mut buf = [0; 16];
+ let mut slice = &mut buf[..];
+ while !slice.is_empty() {
+ let res = cvt(unsafe { abi::read_entropy(slice.as_mut_ptr(), slice.len(), 0) })
+ .expect("failed to generate random hashmap keys");
+ slice = &mut slice[res as usize..];
+ }
+
+ let key1 = buf[..8].try_into().unwrap();
+ let key2 = buf[8..].try_into().unwrap();
+ (u64::from_ne_bytes(key1), u64::from_ne_bytes(key2))
}
// This function is needed by the panic runtime. The symbol is named in
diff --git a/library/std/src/sys/hermit/net.rs b/library/std/src/sys/hermit/net.rs
index 5fb6281aa..d6f64a297 100644
--- a/library/std/src/sys/hermit/net.rs
+++ b/library/std/src/sys/hermit/net.rs
@@ -1,7 +1,7 @@
#![allow(dead_code)]
use crate::cmp;
-use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{self, BorrowedBuf, BorrowedCursor, IoSlice, IoSliceMut};
use crate::mem;
use crate::net::{Shutdown, SocketAddr};
use crate::os::hermit::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, RawFd};
@@ -146,18 +146,35 @@ impl Socket {
Ok(Socket(unsafe { FileDesc::from_raw_fd(fd) }))
}
- fn recv_with_flags(&self, buf: &mut [u8], flags: i32) -> io::Result<usize> {
- let ret =
- cvt(unsafe { netc::recv(self.0.as_raw_fd(), buf.as_mut_ptr(), buf.len(), flags) })?;
- Ok(ret as usize)
+ fn recv_with_flags(&self, mut buf: BorrowedCursor<'_>, flags: i32) -> io::Result<()> {
+ let ret = cvt(unsafe {
+ netc::recv(
+ self.0.as_raw_fd(),
+ buf.as_mut().as_mut_ptr() as *mut u8,
+ buf.capacity(),
+ flags,
+ )
+ })?;
+ unsafe {
+ buf.advance(ret as usize);
+ }
+ Ok(())
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
- self.recv_with_flags(buf, 0)
+ let mut buf = BorrowedBuf::from(buf);
+ self.recv_with_flags(buf.unfilled(), 0)?;
+ Ok(buf.len())
}
pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
- self.recv_with_flags(buf, netc::MSG_PEEK)
+ let mut buf = BorrowedBuf::from(buf);
+ self.recv_with_flags(buf.unfilled(), netc::MSG_PEEK)?;
+ Ok(buf.len())
+ }
+
+ pub fn read_buf(&self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.recv_with_flags(buf, 0)
}
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs
index c080c176a..e767b2866 100644
--- a/library/std/src/sys/mod.rs
+++ b/library/std/src/sys/mod.rs
@@ -76,3 +76,12 @@ cfg_if::cfg_if! {
pub mod c;
}
}
+
+cfg_if::cfg_if! {
+ // Fuchsia components default to full backtrace.
+ if #[cfg(target_os = "fuchsia")] {
+ pub const FULL_BACKTRACE_DEFAULT: bool = true;
+ } else {
+ pub const FULL_BACKTRACE_DEFAULT: bool = false;
+ }
+}
diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
index 0d934318c..01505e944 100644
--- a/library/std/src/sys/sgx/abi/usercalls/alloc.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
@@ -3,7 +3,6 @@
use crate::arch::asm;
use crate::cell::UnsafeCell;
use crate::cmp;
-use crate::convert::TryInto;
use crate::mem;
use crate::ops::{CoerceUnsized, Deref, DerefMut, Index, IndexMut};
use crate::ptr::{self, NonNull};
diff --git a/library/std/src/sys/sgx/fd.rs b/library/std/src/sys/sgx/fd.rs
index e5dc5b5ad..0c02a1076 100644
--- a/library/std/src/sys/sgx/fd.rs
+++ b/library/std/src/sys/sgx/fd.rs
@@ -1,7 +1,7 @@
use fortanix_sgx_abi::Fd;
use super::abi::usercalls;
-use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut};
use crate::mem;
use crate::sys::{AsInner, FromInner, IntoInner};
@@ -30,6 +30,10 @@ impl FileDesc {
usercalls::read(self.fd, &mut [IoSliceMut::new(buf)])
}
+ pub fn read_buf(&self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ crate::io::default_read_buf(|b| self.read(b), buf)
+ }
+
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
usercalls::read(self.fd, bufs)
}
diff --git a/library/std/src/sys/sgx/net.rs b/library/std/src/sys/sgx/net.rs
index 4c4cd7d1d..923be5eb9 100644
--- a/library/std/src/sys/sgx/net.rs
+++ b/library/std/src/sys/sgx/net.rs
@@ -1,6 +1,6 @@
use crate::error;
use crate::fmt;
-use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut};
use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr, ToSocketAddrs};
use crate::sync::Arc;
use crate::sys::fd::FileDesc;
@@ -144,6 +144,10 @@ impl TcpStream {
self.inner.inner.read(buf)
}
+ pub fn read_buf(&self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.inner.inner.read_buf(buf)
+ }
+
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.inner.read_vectored(bufs)
}
diff --git a/library/std/src/sys/solid/net.rs b/library/std/src/sys/solid/net.rs
index 1b98ef993..7d7bfae14 100644
--- a/library/std/src/sys/solid/net.rs
+++ b/library/std/src/sys/solid/net.rs
@@ -2,7 +2,7 @@ use super::abi;
use crate::{
cmp,
ffi::CStr,
- io::{self, ErrorKind, IoSlice, IoSliceMut},
+ io::{self, BorrowedBuf, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut},
mem,
net::{Shutdown, SocketAddr},
ptr, str,
@@ -294,19 +294,30 @@ impl Socket {
self.0.duplicate().map(Socket)
}
- fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> {
+ fn recv_with_flags(&self, mut buf: BorrowedCursor<'_>, flags: c_int) -> io::Result<()> {
let ret = cvt(unsafe {
- netc::recv(self.0.raw(), buf.as_mut_ptr() as *mut c_void, buf.len(), flags)
+ netc::recv(self.0.raw(), buf.as_mut().as_mut_ptr().cast(), buf.capacity(), flags)
})?;
- Ok(ret as usize)
+ unsafe {
+ buf.advance(ret as usize);
+ }
+ Ok(())
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
- self.recv_with_flags(buf, 0)
+ let mut buf = BorrowedBuf::from(buf);
+ self.recv_with_flags(buf.unfilled(), 0)?;
+ Ok(buf.len())
}
pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
- self.recv_with_flags(buf, MSG_PEEK)
+ let mut buf = BorrowedBuf::from(buf);
+ self.recv_with_flags(buf.unfilled(), MSG_PEEK)?;
+ Ok(buf.len())
+ }
+
+ pub fn read_buf(&self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.recv_with_flags(buf, 0)
}
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
diff --git a/library/std/src/sys/unix/fd.rs b/library/std/src/sys/unix/fd.rs
index 9874af4d3..ce5c048f2 100644
--- a/library/std/src/sys/unix/fd.rs
+++ b/library/std/src/sys/unix/fd.rs
@@ -469,6 +469,15 @@ impl<'a> Read for &'a FileDesc {
fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
(**self).read_buf(cursor)
}
+
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ (**self).read_vectored(bufs)
+ }
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ (**self).is_read_vectored()
+ }
}
impl AsInner<OwnedFd> for FileDesc {
diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs
index 7566fafda..abef170dd 100644
--- a/library/std/src/sys/unix/fs.rs
+++ b/library/std/src/sys/unix/fs.rs
@@ -34,7 +34,7 @@ use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
target_os = "watchos",
))]
use crate::sys::weak::syscall;
-#[cfg(any(target_os = "android", target_os = "macos"))]
+#[cfg(any(target_os = "android", target_os = "macos", target_os = "solaris"))]
use crate::sys::weak::weak;
use libc::{c_int, mode_t};
@@ -43,6 +43,7 @@ use libc::{c_int, mode_t};
target_os = "macos",
target_os = "ios",
target_os = "watchos",
+ target_os = "solaris",
all(target_os = "linux", target_env = "gnu")
))]
use libc::c_char;
@@ -1497,8 +1498,8 @@ pub fn link(original: &Path, link: &Path) -> io::Result<()> {
// Android has `linkat` on newer versions, but we happen to know `link`
// always has the correct behavior, so it's here as well.
cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?;
- } else if #[cfg(target_os = "macos")] {
- // On MacOS, older versions (<=10.9) lack support for linkat while newer
+ } else if #[cfg(any(target_os = "macos", target_os = "solaris"))] {
+ // MacOS (<=10.9) and Solaris 10 lack support for linkat while newer
// versions have it. We want to use linkat if it is available, so we use weak!
// to check. `linkat` is preferable to `link` because it gives us a flag to
// specify how symlinks should be handled. We pass 0 as the flags argument,
@@ -1892,7 +1893,7 @@ mod remove_dir_impl {
// file descriptor is automatically closed by libc::closedir() now, so give up ownership
let new_parent_fd = dir_fd.into_raw_fd();
// a valid root is not needed because we do not call any functions involving the full path
- // of the DirEntrys.
+ // of the `DirEntry`s.
let dummy_root = PathBuf::new();
let inner = InnerReadDir { dirp, root: dummy_root };
Ok((ReadDir::new(inner), new_parent_fd))
diff --git a/library/std/src/sys/unix/futex.rs b/library/std/src/sys/unix/futex.rs
index 8d5b54021..d310be6c7 100644
--- a/library/std/src/sys/unix/futex.rs
+++ b/library/std/src/sys/unix/futex.rs
@@ -273,8 +273,6 @@ pub mod zircon {
#[cfg(target_os = "fuchsia")]
pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
- use crate::convert::TryFrom;
-
// Sleep forever if the timeout is longer than fits in a i64.
let deadline = timeout
.and_then(|d| {
diff --git a/library/std/src/sys/unix/kernel_copy.rs b/library/std/src/sys/unix/kernel_copy.rs
index 73b9bef7e..16c8e0c0e 100644
--- a/library/std/src/sys/unix/kernel_copy.rs
+++ b/library/std/src/sys/unix/kernel_copy.rs
@@ -17,11 +17,9 @@
//! Once it has obtained all necessary pieces and brought any wrapper types into a state where they
//! can be safely bypassed it will attempt to use the `copy_file_range(2)`,
//! `sendfile(2)` or `splice(2)` syscalls to move data directly between file descriptors.
-//! Since those syscalls have requirements that cannot be fully checked in advance and
-//! gathering additional information about file descriptors would require additional syscalls
-//! anyway it simply attempts to use them one after another (guided by inaccurate hints) to
-//! figure out which one works and falls back to the generic read-write copy loop if none of them
-//! does.
+//! Since those syscalls have requirements that cannot be fully checked in advance it attempts
+//! to use them one after another (guided by hints) to figure out which one works and
+//! falls back to the generic read-write copy loop if none of them does.
//! Once a working syscall is found for a pair of file descriptors it will be called in a loop
//! until the copy operation is completed.
//!
@@ -84,14 +82,10 @@ pub(crate) fn copy_spec<R: Read + ?Sized, W: Write + ?Sized>(
/// The methods on this type only provide hints, due to `AsRawFd` and `FromRawFd` the inferred
/// type may be wrong.
enum FdMeta {
- /// We obtained the FD from a type that can contain any type of `FileType` and queried the metadata
- /// because it is cheaper than probing all possible syscalls (reader side)
Metadata(Metadata),
Socket,
Pipe,
- /// We don't have any metadata, e.g. because the original type was `File` which can represent
- /// any `FileType` and we did not query the metadata either since it did not seem beneficial
- /// (writer side)
+ /// We don't have any metadata because the stat syscall failed
NoneObtained,
}
@@ -131,6 +125,39 @@ impl FdMeta {
}
}
+/// Returns true either if changes made to the source after a sendfile/splice call won't become
+/// visible in the sink or the source has explicitly opted into such behavior (e.g. by splicing
+/// a file into a pipe, the pipe being the source in this case).
+///
+/// This will prevent File -> Pipe and File -> Socket splicing/sendfile optimizations to uphold
+/// the Read/Write API semantics of io::copy.
+///
+/// Note: This is not 100% airtight, the caller can use the RawFd conversion methods to turn a
+/// regular file into a TcpSocket which will be treated as a socket here without checking.
+fn safe_kernel_copy(source: &FdMeta, sink: &FdMeta) -> bool {
+ match (source, sink) {
+ // Data arriving from a socket is safe because the sender can't modify the socket buffer.
+ // Data arriving from a pipe is safe(-ish) because either the sender *copied*
+ // the bytes into the pipe OR explicitly performed an operation that enables zero-copy,
+ // thus promising not to modify the data later.
+ (FdMeta::Socket, _) => true,
+ (FdMeta::Pipe, _) => true,
+ (FdMeta::Metadata(meta), _)
+ if meta.file_type().is_fifo() || meta.file_type().is_socket() =>
+ {
+ true
+ }
+ // Data going into non-pipes/non-sockets is safe because the "later changes may become visible" issue
+ // only happens for pages sitting in send buffers or pipes.
+ (_, FdMeta::Metadata(meta))
+ if !meta.file_type().is_fifo() && !meta.file_type().is_socket() =>
+ {
+ true
+ }
+ _ => false,
+ }
+}
+
struct CopyParams(FdMeta, Option<RawFd>);
struct Copier<'a, 'b, R: Read + ?Sized, W: Write + ?Sized> {
@@ -186,7 +213,8 @@ impl<R: CopyRead, W: CopyWrite> SpecCopy for Copier<'_, '_, R, W> {
// So we just try and fallback if needed.
// If current file offsets + write sizes overflow it may also fail, we do not try to fix that and instead
// fall back to the generic copy loop.
- if input_meta.potential_sendfile_source() {
+ if input_meta.potential_sendfile_source() && safe_kernel_copy(&input_meta, &output_meta)
+ {
let result = sendfile_splice(SpliceMode::Sendfile, readfd, writefd, max_write);
result.update_take(reader);
@@ -197,7 +225,9 @@ impl<R: CopyRead, W: CopyWrite> SpecCopy for Copier<'_, '_, R, W> {
}
}
- if input_meta.maybe_fifo() || output_meta.maybe_fifo() {
+ if (input_meta.maybe_fifo() || output_meta.maybe_fifo())
+ && safe_kernel_copy(&input_meta, &output_meta)
+ {
let result = sendfile_splice(SpliceMode::Splice, readfd, writefd, max_write);
result.update_take(reader);
@@ -298,13 +328,13 @@ impl CopyRead for &File {
impl CopyWrite for File {
fn properties(&self) -> CopyParams {
- CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd()))
+ CopyParams(fd_to_meta(self), Some(self.as_raw_fd()))
}
}
impl CopyWrite for &File {
fn properties(&self) -> CopyParams {
- CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd()))
+ CopyParams(fd_to_meta(*self), Some(self.as_raw_fd()))
}
}
@@ -401,13 +431,13 @@ impl CopyRead for StdinLock<'_> {
impl CopyWrite for StdoutLock<'_> {
fn properties(&self) -> CopyParams {
- CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd()))
+ CopyParams(fd_to_meta(self), Some(self.as_raw_fd()))
}
}
impl CopyWrite for StderrLock<'_> {
fn properties(&self) -> CopyParams {
- CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd()))
+ CopyParams(fd_to_meta(self), Some(self.as_raw_fd()))
}
}
diff --git a/library/std/src/sys/unix/kernel_copy/tests.rs b/library/std/src/sys/unix/kernel_copy/tests.rs
index 3fe849e23..a524270e3 100644
--- a/library/std/src/sys/unix/kernel_copy/tests.rs
+++ b/library/std/src/sys/unix/kernel_copy/tests.rs
@@ -83,6 +83,48 @@ fn copies_append_mode_sink() -> Result<()> {
Ok(())
}
+#[test]
+fn dont_splice_pipes_from_files() -> Result<()> {
+ // splicing to a pipe and then modifying the source could lead to changes
+ // becoming visible in an unexpected order.
+
+ use crate::io::SeekFrom;
+ use crate::os::unix::fs::FileExt;
+ use crate::process::{ChildStdin, ChildStdout};
+ use crate::sys_common::FromInner;
+
+ let (read_end, write_end) = crate::sys::pipe::anon_pipe()?;
+
+ let mut read_end = ChildStdout::from_inner(read_end);
+ let mut write_end = ChildStdin::from_inner(write_end);
+
+ let tmp_path = tmpdir();
+ let file = tmp_path.join("to_be_modified");
+ let mut file =
+ crate::fs::OpenOptions::new().create_new(true).read(true).write(true).open(file)?;
+
+ const SZ: usize = libc::PIPE_BUF as usize;
+
+ // put data in page cache
+ let mut buf: [u8; SZ] = [0x01; SZ];
+ file.write_all(&buf).unwrap();
+
+ // copy page into pipe
+ file.seek(SeekFrom::Start(0)).unwrap();
+ assert!(io::copy(&mut file, &mut write_end).unwrap() == SZ as u64);
+
+ // modify file
+ buf[0] = 0x02;
+ file.write_at(&buf, 0).unwrap();
+
+ // read from pipe
+ read_end.read_exact(buf.as_mut_slice()).unwrap();
+
+ assert_eq!(buf[0], 0x01, "data in pipe should reflect the original, not later modifications");
+
+ Ok(())
+}
+
#[bench]
fn bench_file_to_file_copy(b: &mut test::Bencher) {
const BYTES: usize = 128 * 1024;
diff --git a/library/std/src/sys/unix/net.rs b/library/std/src/sys/unix/net.rs
index 8e05b618d..573bfa658 100644
--- a/library/std/src/sys/unix/net.rs
+++ b/library/std/src/sys/unix/net.rs
@@ -1,6 +1,6 @@
use crate::cmp;
use crate::ffi::CStr;
-use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{self, BorrowedBuf, BorrowedCursor, IoSlice, IoSliceMut};
use crate::mem;
use crate::net::{Shutdown, SocketAddr};
use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
@@ -242,19 +242,35 @@ impl Socket {
self.0.duplicate().map(Socket)
}
- fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> {
+ fn recv_with_flags(&self, mut buf: BorrowedCursor<'_>, flags: c_int) -> io::Result<()> {
let ret = cvt(unsafe {
- libc::recv(self.as_raw_fd(), buf.as_mut_ptr() as *mut c_void, buf.len(), flags)
+ libc::recv(
+ self.as_raw_fd(),
+ buf.as_mut().as_mut_ptr() as *mut c_void,
+ buf.capacity(),
+ flags,
+ )
})?;
- Ok(ret as usize)
+ unsafe {
+ buf.advance(ret as usize);
+ }
+ Ok(())
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
- self.recv_with_flags(buf, 0)
+ let mut buf = BorrowedBuf::from(buf);
+ self.recv_with_flags(buf.unfilled(), 0)?;
+ Ok(buf.len())
}
pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
- self.recv_with_flags(buf, MSG_PEEK)
+ let mut buf = BorrowedBuf::from(buf);
+ self.recv_with_flags(buf.unfilled(), MSG_PEEK)?;
+ Ok(buf.len())
+ }
+
+ pub fn read_buf(&self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.recv_with_flags(buf, 0)
}
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
@@ -427,6 +443,17 @@ impl Socket {
Ok(passcred != 0)
}
+ #[cfg(target_os = "freebsd")]
+ pub fn set_passcred(&self, passcred: bool) -> io::Result<()> {
+ setsockopt(self, libc::AF_LOCAL, libc::LOCAL_CREDS_PERSISTENT, passcred as libc::c_int)
+ }
+
+ #[cfg(target_os = "freebsd")]
+ pub fn passcred(&self) -> io::Result<bool> {
+ let passcred: libc::c_int = getsockopt(self, libc::AF_LOCAL, libc::LOCAL_CREDS_PERSISTENT)?;
+ Ok(passcred != 0)
+ }
+
#[cfg(not(any(target_os = "solaris", target_os = "illumos")))]
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
let mut nonblocking = nonblocking as libc::c_int;
diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs
index 21b035fb3..a345af76f 100644
--- a/library/std/src/sys/unix/os.rs
+++ b/library/std/src/sys/unix/os.rs
@@ -7,7 +7,6 @@ mod tests;
use crate::os::unix::prelude::*;
-use crate::convert::TryFrom;
use crate::error::Error as StdError;
use crate::ffi::{CStr, CString, OsStr, OsString};
use crate::fmt;
@@ -115,7 +114,10 @@ pub fn set_errno(e: i32) {
/// Gets a detailed string description for the given error number.
pub fn error_string(errno: i32) -> String {
extern "C" {
- #[cfg_attr(any(target_os = "linux", target_env = "newlib"), link_name = "__xpg_strerror_r")]
+ #[cfg_attr(
+ all(any(target_os = "linux", target_env = "newlib"), not(target_env = "ohos")),
+ link_name = "__xpg_strerror_r"
+ )]
fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: libc::size_t) -> c_int;
}
diff --git a/library/std/src/sys/unix/pipe.rs b/library/std/src/sys/unix/pipe.rs
index a744d0ab6..dc17c9fac 100644
--- a/library/std/src/sys/unix/pipe.rs
+++ b/library/std/src/sys/unix/pipe.rs
@@ -1,4 +1,4 @@
-use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut};
use crate::mem;
use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
use crate::sys::fd::FileDesc;
@@ -49,6 +49,10 @@ impl AnonPipe {
self.0.read(buf)
}
+ pub fn read_buf(&self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.0.read_buf(buf)
+ }
+
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.0.read_vectored(bufs)
}
diff --git a/library/std/src/sys/unix/process/process_fuchsia.rs b/library/std/src/sys/unix/process/process_fuchsia.rs
index d4c7e58b3..e45c380a0 100644
--- a/library/std/src/sys/unix/process/process_fuchsia.rs
+++ b/library/std/src/sys/unix/process/process_fuchsia.rs
@@ -166,7 +166,6 @@ impl Process {
}
pub fn wait(&mut self) -> io::Result<ExitStatus> {
- use crate::default::Default;
use crate::sys::process::zircon::*;
let mut proc_info: zx_info_process_t = Default::default();
@@ -199,7 +198,6 @@ impl Process {
}
pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
- use crate::default::Default;
use crate::sys::process::zircon::*;
let mut proc_info: zx_info_process_t = Default::default();
diff --git a/library/std/src/sys/unix/rand.rs b/library/std/src/sys/unix/rand.rs
index a6fe07873..0f347ffab 100644
--- a/library/std/src/sys/unix/rand.rs
+++ b/library/std/src/sys/unix/rand.rs
@@ -20,7 +20,8 @@ pub fn hashmap_random_keys() -> (u64, u64) {
not(target_os = "netbsd"),
not(target_os = "fuchsia"),
not(target_os = "redox"),
- not(target_os = "vxworks")
+ not(target_os = "vxworks"),
+ not(target_os = "emscripten")
))]
mod imp {
use crate::fs::File;
@@ -174,7 +175,7 @@ mod imp {
}
}
-#[cfg(target_os = "openbsd")]
+#[cfg(any(target_os = "openbsd", target_os = "emscripten"))]
mod imp {
use crate::sys::os::errno;
diff --git a/library/std/src/sys/unix/stdio.rs b/library/std/src/sys/unix/stdio.rs
index b3626c564..a26f20795 100644
--- a/library/std/src/sys/unix/stdio.rs
+++ b/library/std/src/sys/unix/stdio.rs
@@ -1,4 +1,4 @@
-use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut};
use crate::mem::ManuallyDrop;
use crate::os::unix::io::FromRawFd;
use crate::sys::fd::FileDesc;
@@ -18,6 +18,10 @@ impl io::Read for Stdin {
unsafe { ManuallyDrop::new(FileDesc::from_raw_fd(libc::STDIN_FILENO)).read(buf) }
}
+ fn read_buf(&mut self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ unsafe { ManuallyDrop::new(FileDesc::from_raw_fd(libc::STDIN_FILENO)).read_buf(buf) }
+ }
+
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
unsafe { ManuallyDrop::new(FileDesc::from_raw_fd(libc::STDIN_FILENO)).read_vectored(bufs) }
}
diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs
index 0f11de8f5..6f5358340 100644
--- a/library/std/src/sys/unix/time.rs
+++ b/library/std/src/sys/unix/time.rs
@@ -174,6 +174,34 @@ impl From<libc::timespec> for Timespec {
}
}
+#[cfg(all(
+ target_os = "linux",
+ target_env = "gnu",
+ target_pointer_width = "32",
+ not(target_arch = "riscv32")
+))]
+#[repr(C)]
+pub(in crate::sys::unix) struct __timespec64 {
+ pub(in crate::sys::unix) tv_sec: i64,
+ #[cfg(target_endian = "big")]
+ _padding: i32,
+ pub(in crate::sys::unix) tv_nsec: i32,
+ #[cfg(target_endian = "little")]
+ _padding: i32,
+}
+
+#[cfg(all(
+ target_os = "linux",
+ target_env = "gnu",
+ target_pointer_width = "32",
+ not(target_arch = "riscv32")
+))]
+impl From<__timespec64> for Timespec {
+ fn from(t: __timespec64) -> Timespec {
+ Timespec::new(t.tv_sec, t.tv_nsec.into())
+ }
+}
+
#[cfg(any(
all(target_os = "macos", any(not(target_arch = "aarch64"))),
target_os = "ios",
@@ -352,29 +380,23 @@ mod inner {
impl Timespec {
pub fn now(clock: libc::clockid_t) -> Timespec {
// Try to use 64-bit time in preparation for Y2038.
- #[cfg(all(target_os = "linux", target_env = "gnu", target_pointer_width = "32"))]
+ #[cfg(all(
+ target_os = "linux",
+ target_env = "gnu",
+ target_pointer_width = "32",
+ not(target_arch = "riscv32")
+ ))]
{
use crate::sys::weak::weak;
// __clock_gettime64 was added to 32-bit arches in glibc 2.34,
// and it handles both vDSO calls and ENOSYS fallbacks itself.
- weak!(fn __clock_gettime64(libc::clockid_t, *mut __timespec64) -> libc::c_int);
-
- #[repr(C)]
- struct __timespec64 {
- tv_sec: i64,
- #[cfg(target_endian = "big")]
- _padding: i32,
- tv_nsec: i32,
- #[cfg(target_endian = "little")]
- _padding: i32,
- }
+ weak!(fn __clock_gettime64(libc::clockid_t, *mut super::__timespec64) -> libc::c_int);
if let Some(clock_gettime64) = __clock_gettime64.get() {
let mut t = MaybeUninit::uninit();
cvt(unsafe { clock_gettime64(clock, t.as_mut_ptr()) }).unwrap();
- let t = unsafe { t.assume_init() };
- return Timespec::new(t.tv_sec, t.tv_nsec as i64);
+ return Timespec::from(unsafe { t.assume_init() });
}
}
diff --git a/library/std/src/sys/unsupported/net.rs b/library/std/src/sys/unsupported/net.rs
index a5204a084..bbc52703f 100644
--- a/library/std/src/sys/unsupported/net.rs
+++ b/library/std/src/sys/unsupported/net.rs
@@ -1,5 +1,5 @@
use crate::fmt;
-use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut};
use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr};
use crate::sys::unsupported;
use crate::time::Duration;
@@ -39,6 +39,10 @@ impl TcpStream {
self.0
}
+ pub fn read_buf(&self, _buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.0
+ }
+
pub fn read_vectored(&self, _: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.0
}
diff --git a/library/std/src/sys/unsupported/pipe.rs b/library/std/src/sys/unsupported/pipe.rs
index 0bba673b4..d7d8f297a 100644
--- a/library/std/src/sys/unsupported/pipe.rs
+++ b/library/std/src/sys/unsupported/pipe.rs
@@ -1,4 +1,4 @@
-use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut};
pub struct AnonPipe(!);
@@ -7,6 +7,10 @@ impl AnonPipe {
self.0
}
+ pub fn read_buf(&self, _buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.0
+ }
+
pub fn read_vectored(&self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.0
}
diff --git a/library/std/src/sys/wasi/fd.rs b/library/std/src/sys/wasi/fd.rs
index 0b9c8e61d..191db4b60 100644
--- a/library/std/src/sys/wasi/fd.rs
+++ b/library/std/src/sys/wasi/fd.rs
@@ -2,7 +2,7 @@
#![allow(dead_code)]
use super::err2io;
-use crate::io::{self, IoSlice, IoSliceMut, SeekFrom};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut, SeekFrom};
use crate::mem;
use crate::net::Shutdown;
use crate::os::wasi::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
@@ -46,6 +46,22 @@ impl WasiFd {
unsafe { wasi::fd_read(self.as_raw_fd() as wasi::Fd, iovec(bufs)).map_err(err2io) }
}
+ pub fn read_buf(&self, mut buf: BorrowedCursor<'_>) -> io::Result<()> {
+ unsafe {
+ let bufs = [wasi::Iovec {
+ buf: buf.as_mut().as_mut_ptr() as *mut u8,
+ buf_len: buf.capacity(),
+ }];
+ match wasi::fd_read(self.as_raw_fd() as wasi::Fd, &bufs) {
+ Ok(n) => {
+ buf.advance(n);
+ Ok(())
+ }
+ Err(e) => Err(err2io(e)),
+ }
+ }
+ }
+
pub fn write(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
unsafe { wasi::fd_write(self.as_raw_fd() as wasi::Fd, ciovec(bufs)).map_err(err2io) }
}
diff --git a/library/std/src/sys/wasi/fs.rs b/library/std/src/sys/wasi/fs.rs
index d4866bbc3..3a205267e 100644
--- a/library/std/src/sys/wasi/fs.rs
+++ b/library/std/src/sys/wasi/fs.rs
@@ -441,7 +441,7 @@ impl File {
}
pub fn read_buf(&self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
- crate::io::default_read_buf(|buf| self.read(buf), cursor)
+ self.fd.read_buf(cursor)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
diff --git a/library/std/src/sys/wasi/net.rs b/library/std/src/sys/wasi/net.rs
index cf4ebba1a..59d94a368 100644
--- a/library/std/src/sys/wasi/net.rs
+++ b/library/std/src/sys/wasi/net.rs
@@ -3,7 +3,7 @@
use super::err2io;
use super::fd::WasiFd;
use crate::fmt;
-use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut};
use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr};
use crate::os::wasi::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
use crate::sys::unsupported;
@@ -91,6 +91,10 @@ impl TcpStream {
self.read_vectored(&mut [IoSliceMut::new(buf)])
}
+ pub fn read_buf(&self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.socket().as_inner().read_buf(buf)
+ }
+
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.socket().as_inner().read(bufs)
}
diff --git a/library/std/src/sys/windows/args.rs b/library/std/src/sys/windows/args.rs
index 30356fa85..43c0cdb65 100644
--- a/library/std/src/sys/windows/args.rs
+++ b/library/std/src/sys/windows/args.rs
@@ -11,10 +11,11 @@ use crate::fmt;
use crate::io;
use crate::num::NonZeroU16;
use crate::os::windows::prelude::*;
-use crate::path::PathBuf;
-use crate::sys::c;
+use crate::path::{Path, PathBuf};
+use crate::sys::path::get_long_path;
use crate::sys::process::ensure_no_nuls;
use crate::sys::windows::os::current_exe;
+use crate::sys::{c, to_u16s};
use crate::sys_common::wstr::WStrUnits;
use crate::vec;
@@ -311,7 +312,7 @@ pub(crate) fn make_bat_command_line(
/// Takes a path and tries to return a non-verbatim path.
///
/// This is necessary because cmd.exe does not support verbatim paths.
-pub(crate) fn to_user_path(mut path: Vec<u16>) -> io::Result<Vec<u16>> {
+pub(crate) fn to_user_path(path: &Path) -> io::Result<Vec<u16>> {
use crate::ptr;
use crate::sys::windows::fill_utf16_buf;
@@ -324,6 +325,8 @@ pub(crate) fn to_user_path(mut path: Vec<u16>) -> io::Result<Vec<u16>> {
const N: u16 = b'N' as _;
const C: u16 = b'C' as _;
+ let mut path = to_u16s(path)?;
+
// Early return if the path is too long to remove the verbatim prefix.
const LEGACY_MAX_PATH: usize = 260;
if path.len() > LEGACY_MAX_PATH {
@@ -337,7 +340,13 @@ pub(crate) fn to_user_path(mut path: Vec<u16>) -> io::Result<Vec<u16>> {
fill_utf16_buf(
|buffer, size| c::GetFullPathNameW(lpfilename, size, buffer, ptr::null_mut()),
|full_path: &[u16]| {
- if full_path == &path[4..path.len() - 1] { full_path.into() } else { path }
+ if full_path == &path[4..path.len() - 1] {
+ let mut path: Vec<u16> = full_path.into();
+ path.push(0);
+ path
+ } else {
+ path
+ }
},
)
},
@@ -350,7 +359,9 @@ pub(crate) fn to_user_path(mut path: Vec<u16>) -> io::Result<Vec<u16>> {
|buffer, size| c::GetFullPathNameW(lpfilename, size, buffer, ptr::null_mut()),
|full_path: &[u16]| {
if full_path == &path[6..path.len() - 1] {
- full_path.into()
+ let mut path: Vec<u16> = full_path.into();
+ path.push(0);
+ path
} else {
// Restore the 'C' in "UNC".
path[6] = b'C' as u16;
@@ -360,6 +371,6 @@ pub(crate) fn to_user_path(mut path: Vec<u16>) -> io::Result<Vec<u16>> {
)
},
// For everything else, leave the path unchanged.
- _ => Ok(path),
+ _ => get_long_path(path, false),
}
}
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
index 5d150eca0..1f4092ad7 100644
--- a/library/std/src/sys/windows/c.rs
+++ b/library/std/src/sys/windows/c.rs
@@ -296,7 +296,6 @@ pub const STATUS_INVALID_PARAMETER: NTSTATUS = 0xc000000d_u32 as _;
pub const STATUS_PENDING: NTSTATUS = 0x103 as _;
pub const STATUS_END_OF_FILE: NTSTATUS = 0xC0000011_u32 as _;
-pub const STATUS_NOT_IMPLEMENTED: NTSTATUS = 0xC0000002_u32 as _;
// Equivalent to the `NT_SUCCESS` C preprocessor macro.
// See: https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/using-ntstatus-values
@@ -1282,6 +1281,46 @@ extern "system" {
) -> NTSTATUS;
}
+#[link(name = "ntdll")]
+extern "system" {
+ pub fn NtCreateFile(
+ FileHandle: *mut HANDLE,
+ DesiredAccess: ACCESS_MASK,
+ ObjectAttributes: *const OBJECT_ATTRIBUTES,
+ IoStatusBlock: *mut IO_STATUS_BLOCK,
+ AllocationSize: *mut i64,
+ FileAttributes: ULONG,
+ ShareAccess: ULONG,
+ CreateDisposition: ULONG,
+ CreateOptions: ULONG,
+ EaBuffer: *mut c_void,
+ EaLength: ULONG,
+ ) -> NTSTATUS;
+ pub fn NtReadFile(
+ FileHandle: BorrowedHandle<'_>,
+ Event: HANDLE,
+ ApcRoutine: Option<IO_APC_ROUTINE>,
+ ApcContext: *mut c_void,
+ IoStatusBlock: &mut IO_STATUS_BLOCK,
+ Buffer: *mut crate::mem::MaybeUninit<u8>,
+ Length: ULONG,
+ ByteOffset: Option<&LARGE_INTEGER>,
+ Key: Option<&ULONG>,
+ ) -> NTSTATUS;
+ pub fn NtWriteFile(
+ FileHandle: BorrowedHandle<'_>,
+ Event: HANDLE,
+ ApcRoutine: Option<IO_APC_ROUTINE>,
+ ApcContext: *mut c_void,
+ IoStatusBlock: &mut IO_STATUS_BLOCK,
+ Buffer: *const u8,
+ Length: ULONG,
+ ByteOffset: Option<&LARGE_INTEGER>,
+ Key: Option<&ULONG>,
+ ) -> NTSTATUS;
+ pub fn RtlNtStatusToDosError(Status: NTSTATUS) -> ULONG;
+}
+
// Functions that aren't available on every version of Windows that we support,
// but we still use them and just provide some form of a fallback implementation.
compat_fn_with_fallback! {
@@ -1322,52 +1361,6 @@ compat_fn_optional! {
compat_fn_with_fallback! {
pub static NTDLL: &CStr = ansi_str!("ntdll");
- pub fn NtCreateFile(
- FileHandle: *mut HANDLE,
- DesiredAccess: ACCESS_MASK,
- ObjectAttributes: *const OBJECT_ATTRIBUTES,
- IoStatusBlock: *mut IO_STATUS_BLOCK,
- AllocationSize: *mut i64,
- FileAttributes: ULONG,
- ShareAccess: ULONG,
- CreateDisposition: ULONG,
- CreateOptions: ULONG,
- EaBuffer: *mut c_void,
- EaLength: ULONG
- ) -> NTSTATUS {
- STATUS_NOT_IMPLEMENTED
- }
- pub fn NtReadFile(
- FileHandle: BorrowedHandle<'_>,
- Event: HANDLE,
- ApcRoutine: Option<IO_APC_ROUTINE>,
- ApcContext: *mut c_void,
- IoStatusBlock: &mut IO_STATUS_BLOCK,
- Buffer: *mut crate::mem::MaybeUninit<u8>,
- Length: ULONG,
- ByteOffset: Option<&LARGE_INTEGER>,
- Key: Option<&ULONG>
- ) -> NTSTATUS {
- STATUS_NOT_IMPLEMENTED
- }
- pub fn NtWriteFile(
- FileHandle: BorrowedHandle<'_>,
- Event: HANDLE,
- ApcRoutine: Option<IO_APC_ROUTINE>,
- ApcContext: *mut c_void,
- IoStatusBlock: &mut IO_STATUS_BLOCK,
- Buffer: *const u8,
- Length: ULONG,
- ByteOffset: Option<&LARGE_INTEGER>,
- Key: Option<&ULONG>
- ) -> NTSTATUS {
- STATUS_NOT_IMPLEMENTED
- }
- pub fn RtlNtStatusToDosError(
- Status: NTSTATUS
- ) -> ULONG {
- Status as ULONG
- }
pub fn NtCreateKeyedEvent(
KeyedEventHandle: LPHANDLE,
DesiredAccess: ACCESS_MASK,
diff --git a/library/std/src/sys/windows/c/errors.rs b/library/std/src/sys/windows/c/errors.rs
index 23dcc119d..ad8da19b6 100644
--- a/library/std/src/sys/windows/c/errors.rs
+++ b/library/std/src/sys/windows/c/errors.rs
@@ -12,7 +12,7 @@ pub const ERROR_RESOURCE_CALL_TIMED_OUT: DWORD = 5910;
pub const FRS_ERR_SYSVOL_POPULATE_TIMEOUT: DWORD = 8014;
pub const DNS_ERROR_RECORD_TIMED_OUT: DWORD = 9705;
-// The followiung list was obtained from
+// The following list was obtained from
// `/usr/x86_64-w64-mingw32/include/winerror.h`
// in the Debian package
// mingw-w64_6.0.0-3_all.deb
diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs
index d2c597664..956db577d 100644
--- a/library/std/src/sys/windows/fs.rs
+++ b/library/std/src/sys/windows/fs.rs
@@ -1236,7 +1236,17 @@ pub fn link(_original: &Path, _link: &Path) -> io::Result<()> {
}
pub fn stat(path: &Path) -> io::Result<FileAttr> {
- metadata(path, ReparsePoint::Follow)
+ match metadata(path, ReparsePoint::Follow) {
+ Err(err) if err.raw_os_error() == Some(c::ERROR_CANT_ACCESS_FILE as i32) => {
+ if let Ok(attrs) = lstat(path) {
+ if !attrs.file_type().is_symlink() {
+ return Ok(attrs);
+ }
+ }
+ Err(err)
+ }
+ result => result,
+ }
}
pub fn lstat(path: &Path) -> io::Result<FileAttr> {
@@ -1393,24 +1403,40 @@ fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> {
opts.custom_flags(c::FILE_FLAG_OPEN_REPARSE_POINT | c::FILE_FLAG_BACKUP_SEMANTICS);
let f = File::open(junction, &opts)?;
let h = f.as_inner().as_raw_handle();
-
unsafe {
let mut data = Align8([MaybeUninit::<u8>::uninit(); c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE]);
let data_ptr = data.0.as_mut_ptr();
+ let data_end = data_ptr.add(c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE);
let db = data_ptr.cast::<c::REPARSE_MOUNTPOINT_DATA_BUFFER>();
// Zero the header to ensure it's fully initialized, including reserved parameters.
*db = mem::zeroed();
- let buf = ptr::addr_of_mut!((*db).ReparseTarget).cast::<c::WCHAR>();
- let mut i = 0;
+ let reparse_target_slice = {
+ let buf_start = ptr::addr_of_mut!((*db).ReparseTarget).cast::<c::WCHAR>();
+ // Compute offset in bytes and then divide so that we round down
+ // rather than hit any UB (admittedly this arithmetic should work
+ // out so that this isn't necessary)
+ let buf_len_bytes = usize::try_from(data_end.byte_offset_from(buf_start)).unwrap();
+ let buf_len_wchars = buf_len_bytes / core::mem::size_of::<c::WCHAR>();
+ core::slice::from_raw_parts_mut(buf_start, buf_len_wchars)
+ };
+
// FIXME: this conversion is very hacky
- let v = br"\??\";
- let v = v.iter().map(|x| *x as u16);
- for c in v.chain(original.as_os_str().encode_wide()) {
- *buf.add(i) = c;
+ let iter = br"\??\"
+ .iter()
+ .map(|x| *x as u16)
+ .chain(original.as_os_str().encode_wide())
+ .chain(core::iter::once(0));
+ let mut i = 0;
+ for c in iter {
+ if i >= reparse_target_slice.len() {
+ return Err(crate::io::const_io_error!(
+ crate::io::ErrorKind::InvalidFilename,
+ "Input filename is too long"
+ ));
+ }
+ reparse_target_slice[i] = c;
i += 1;
}
- *buf.add(i) = 0;
- i += 1;
(*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT;
(*db).ReparseTargetMaximumLength = (i * 2) as c::WORD;
(*db).ReparseTargetLength = ((i - 1) * 2) as c::WORD;
diff --git a/library/std/src/sys/windows/handle.rs b/library/std/src/sys/windows/handle.rs
index ae33d48c6..b290f4070 100644
--- a/library/std/src/sys/windows/handle.rs
+++ b/library/std/src/sys/windows/handle.rs
@@ -327,7 +327,16 @@ impl<'a> Read for &'a Handle {
(**self).read(buf)
}
+ fn read_buf(&mut self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ (**self).read_buf(buf)
+ }
+
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
(**self).read_vectored(bufs)
}
+
+ #[inline]
+ fn is_read_vectored(&self) -> bool {
+ (**self).is_read_vectored()
+ }
}
diff --git a/library/std/src/sys/windows/net.rs b/library/std/src/sys/windows/net.rs
index e0701a498..ee1f5482b 100644
--- a/library/std/src/sys/windows/net.rs
+++ b/library/std/src/sys/windows/net.rs
@@ -1,7 +1,7 @@
#![unstable(issue = "none", feature = "windows_net")]
use crate::cmp;
-use crate::io::{self, IoSlice, IoSliceMut, Read};
+use crate::io::{self, BorrowedBuf, BorrowedCursor, IoSlice, IoSliceMut, Read};
use crate::mem;
use crate::net::{Shutdown, SocketAddr};
use crate::os::windows::io::{
@@ -214,28 +214,38 @@ impl Socket {
Ok(Self(self.0.try_clone()?))
}
- fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> {
+ fn recv_with_flags(&self, mut buf: BorrowedCursor<'_>, flags: c_int) -> io::Result<()> {
// On unix when a socket is shut down all further reads return 0, so we
// do the same on windows to map a shut down socket to returning EOF.
- let length = cmp::min(buf.len(), i32::MAX as usize) as i32;
- let result =
- unsafe { c::recv(self.as_raw_socket(), buf.as_mut_ptr() as *mut _, length, flags) };
+ let length = cmp::min(buf.capacity(), i32::MAX as usize) as i32;
+ let result = unsafe {
+ c::recv(self.as_raw_socket(), buf.as_mut().as_mut_ptr() as *mut _, length, flags)
+ };
match result {
c::SOCKET_ERROR => {
let error = unsafe { c::WSAGetLastError() };
if error == c::WSAESHUTDOWN {
- Ok(0)
+ Ok(())
} else {
Err(io::Error::from_raw_os_error(error))
}
}
- _ => Ok(result as usize),
+ _ => {
+ unsafe { buf.advance(result as usize) };
+ Ok(())
+ }
}
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ let mut buf = BorrowedBuf::from(buf);
+ self.recv_with_flags(buf.unfilled(), 0)?;
+ Ok(buf.len())
+ }
+
+ pub fn read_buf(&self, buf: BorrowedCursor<'_>) -> io::Result<()> {
self.recv_with_flags(buf, 0)
}
@@ -277,7 +287,9 @@ impl Socket {
}
pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
- self.recv_with_flags(buf, c::MSG_PEEK)
+ let mut buf = BorrowedBuf::from(buf);
+ self.recv_with_flags(buf.unfilled(), c::MSG_PEEK)?;
+ Ok(buf.len())
}
fn recv_from_with_flags(
diff --git a/library/std/src/sys/windows/path.rs b/library/std/src/sys/windows/path.rs
index beeca1917..c3573d14c 100644
--- a/library/std/src/sys/windows/path.rs
+++ b/library/std/src/sys/windows/path.rs
@@ -220,6 +220,19 @@ fn parse_next_component(path: &OsStr, verbatim: bool) -> (&OsStr, &OsStr) {
///
/// This path may or may not have a verbatim prefix.
pub(crate) fn maybe_verbatim(path: &Path) -> io::Result<Vec<u16>> {
+ let path = to_u16s(path)?;
+ get_long_path(path, true)
+}
+
+/// Get a normalized absolute path that can bypass path length limits.
+///
+/// Setting prefer_verbatim to true suggests a stronger preference for verbatim
+/// paths even when not strictly necessary. This allows the Windows API to avoid
+/// repeating our work. However, if the path may be given back to users or
+/// passed to other application then it's preferable to use non-verbatim paths
+/// when possible. Non-verbatim paths are better understood by users and handled
+/// by more software.
+pub(crate) fn get_long_path(mut path: Vec<u16>, prefer_verbatim: bool) -> io::Result<Vec<u16>> {
// Normally the MAX_PATH is 260 UTF-16 code units (including the NULL).
// However, for APIs such as CreateDirectory[1], the limit is 248.
//
@@ -243,7 +256,6 @@ pub(crate) fn maybe_verbatim(path: &Path) -> io::Result<Vec<u16>> {
// \\?\UNC\
const UNC_PREFIX: &[u16] = &[SEP, SEP, QUERY, SEP, U, N, C, SEP];
- let mut path = to_u16s(path)?;
if path.starts_with(VERBATIM_PREFIX) || path.starts_with(NT_PREFIX) || path == &[0] {
// Early return for paths that are already verbatim or empty.
return Ok(path);
@@ -275,29 +287,34 @@ pub(crate) fn maybe_verbatim(path: &Path) -> io::Result<Vec<u16>> {
|mut absolute| {
path.clear();
- // Secondly, add the verbatim prefix. This is easier here because we know the
- // path is now absolute and fully normalized (e.g. `/` has been changed to `\`).
- let prefix = match absolute {
- // C:\ => \\?\C:\
- [_, COLON, SEP, ..] => VERBATIM_PREFIX,
- // \\.\ => \\?\
- [SEP, SEP, DOT, SEP, ..] => {
- absolute = &absolute[4..];
- VERBATIM_PREFIX
- }
- // Leave \\?\ and \??\ as-is.
- [SEP, SEP, QUERY, SEP, ..] | [SEP, QUERY, QUERY, SEP, ..] => &[],
- // \\ => \\?\UNC\
- [SEP, SEP, ..] => {
- absolute = &absolute[2..];
- UNC_PREFIX
- }
- // Anything else we leave alone.
- _ => &[],
- };
-
- path.reserve_exact(prefix.len() + absolute.len() + 1);
- path.extend_from_slice(prefix);
+ // Only prepend the prefix if needed.
+ if prefer_verbatim || absolute.len() + 1 >= LEGACY_MAX_PATH {
+ // Secondly, add the verbatim prefix. This is easier here because we know the
+ // path is now absolute and fully normalized (e.g. `/` has been changed to `\`).
+ let prefix = match absolute {
+ // C:\ => \\?\C:\
+ [_, COLON, SEP, ..] => VERBATIM_PREFIX,
+ // \\.\ => \\?\
+ [SEP, SEP, DOT, SEP, ..] => {
+ absolute = &absolute[4..];
+ VERBATIM_PREFIX
+ }
+ // Leave \\?\ and \??\ as-is.
+ [SEP, SEP, QUERY, SEP, ..] | [SEP, QUERY, QUERY, SEP, ..] => &[],
+ // \\ => \\?\UNC\
+ [SEP, SEP, ..] => {
+ absolute = &absolute[2..];
+ UNC_PREFIX
+ }
+ // Anything else we leave alone.
+ _ => &[],
+ };
+
+ path.reserve_exact(prefix.len() + absolute.len() + 1);
+ path.extend_from_slice(prefix);
+ } else {
+ path.reserve_exact(absolute.len() + 1);
+ }
path.extend_from_slice(absolute);
path.push(0);
},
diff --git a/library/std/src/sys/windows/pipe.rs b/library/std/src/sys/windows/pipe.rs
index 7b25edaa5..0780b2958 100644
--- a/library/std/src/sys/windows/pipe.rs
+++ b/library/std/src/sys/windows/pipe.rs
@@ -1,7 +1,7 @@
use crate::os::windows::prelude::*;
use crate::ffi::OsStr;
-use crate::io::{self, IoSlice, IoSliceMut, Read};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut, Read};
use crate::mem;
use crate::path::Path;
use crate::ptr;
@@ -252,6 +252,28 @@ impl AnonPipe {
}
}
+ pub fn read_buf(&self, mut buf: BorrowedCursor<'_>) -> io::Result<()> {
+ let result = unsafe {
+ let len = crate::cmp::min(buf.capacity(), c::DWORD::MAX as usize) as c::DWORD;
+ self.alertable_io_internal(c::ReadFileEx, buf.as_mut().as_mut_ptr() as _, len)
+ };
+
+ match result {
+ // The special treatment of BrokenPipe is to deal with Windows
+ // pipe semantics, which yields this error when *reading* from
+ // a pipe after the other end has closed; we interpret that as
+ // EOF on the pipe.
+ Err(ref e) if e.kind() == io::ErrorKind::BrokenPipe => Ok(()),
+ Err(e) => Err(e),
+ Ok(n) => {
+ unsafe {
+ buf.advance(n);
+ }
+ Ok(())
+ }
+ }
+ }
+
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.read_vectored(bufs)
}
diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs
index 10bc949e1..1c73b64e2 100644
--- a/library/std/src/sys/windows/process.rs
+++ b/library/std/src/sys/windows/process.rs
@@ -266,11 +266,7 @@ impl Command {
let (program, mut cmd_str) = if is_batch_file {
(
command_prompt()?,
- args::make_bat_command_line(
- &args::to_user_path(program)?,
- &self.args,
- self.force_quotes_enabled,
- )?,
+ args::make_bat_command_line(&program, &self.args, self.force_quotes_enabled)?,
)
} else {
let cmd_str = make_command_line(&self.program, &self.args, self.force_quotes_enabled)?;
@@ -410,7 +406,7 @@ fn resolve_exe<'a>(
if has_exe_suffix {
// The application name is a path to a `.exe` file.
// Let `CreateProcessW` figure out if it exists or not.
- return path::maybe_verbatim(Path::new(exe_path));
+ return args::to_user_path(Path::new(exe_path));
}
let mut path = PathBuf::from(exe_path);
@@ -422,7 +418,7 @@ fn resolve_exe<'a>(
// It's ok to use `set_extension` here because the intent is to
// remove the extension that was just added.
path.set_extension("");
- return path::maybe_verbatim(&path);
+ return args::to_user_path(&path);
}
} else {
ensure_no_nuls(exe_path)?;
@@ -510,7 +506,7 @@ where
/// Check if a file exists without following symlinks.
fn program_exists(path: &Path) -> Option<Vec<u16>> {
unsafe {
- let path = path::maybe_verbatim(path).ok()?;
+ let path = args::to_user_path(path).ok()?;
// Getting attributes using `GetFileAttributesW` does not follow symlinks
// and it will almost always be successful if the link exists.
// There are some exceptions for special system files (e.g. the pagefile)
diff --git a/library/std/src/sys_common/net.rs b/library/std/src/sys_common/net.rs
index 85ecc1def..cb24caa1e 100644
--- a/library/std/src/sys_common/net.rs
+++ b/library/std/src/sys_common/net.rs
@@ -2,9 +2,8 @@
mod tests;
use crate::cmp;
-use crate::convert::{TryFrom, TryInto};
use crate::fmt;
-use crate::io::{self, ErrorKind, IoSlice, IoSliceMut};
+use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut};
use crate::mem;
use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr};
use crate::ptr;
@@ -272,6 +271,10 @@ impl TcpStream {
self.inner.read(buf)
}
+ pub fn read_buf(&self, buf: BorrowedCursor<'_>) -> io::Result<()> {
+ self.inner.read_buf(buf)
+ }
+
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.read_vectored(bufs)
}
diff --git a/library/std/src/sys_common/thread_parking/id.rs b/library/std/src/sys_common/thread_parking/id.rs
index 575988ec7..15042fc3b 100644
--- a/library/std/src/sys_common/thread_parking/id.rs
+++ b/library/std/src/sys_common/thread_parking/id.rs
@@ -79,7 +79,7 @@ impl Parker {
park_timeout(dur, self.state.as_ptr().addr());
// Swap to ensure that we observe all state changes with acquire
// ordering, even if the state has been changed after the timeout
- // occured.
+ // occurred.
self.state.swap(EMPTY, Acquire);
}
}
diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs
index cf7c2e05a..7fdf03acc 100644
--- a/library/std/src/thread/local.rs
+++ b/library/std/src/thread/local.rs
@@ -173,200 +173,6 @@ macro_rules! thread_local {
);
}
-#[doc(hidden)]
-#[unstable(feature = "thread_local_internals", reason = "should not be necessary", issue = "none")]
-#[macro_export]
-#[allow_internal_unstable(thread_local_internals, cfg_target_thread_local, thread_local)]
-#[allow_internal_unsafe]
-macro_rules! __thread_local_inner {
- // used to generate the `LocalKey` value for const-initialized thread locals
- (@key $t:ty, const $init:expr) => {{
- #[cfg_attr(not(windows), inline)] // see comments below
- #[deny(unsafe_op_in_unsafe_fn)]
- unsafe fn __getit(
- _init: $crate::option::Option<&mut $crate::option::Option<$t>>,
- ) -> $crate::option::Option<&'static $t> {
- const INIT_EXPR: $t = $init;
-
- // wasm without atomics maps directly to `static mut`, and dtors
- // aren't implemented because thread dtors aren't really a thing
- // on wasm right now
- //
- // FIXME(#84224) this should come after the `target_thread_local`
- // block.
- #[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
- {
- static mut VAL: $t = INIT_EXPR;
- unsafe { $crate::option::Option::Some(&VAL) }
- }
-
- // If the platform has support for `#[thread_local]`, use it.
- #[cfg(all(
- target_thread_local,
- not(all(target_family = "wasm", not(target_feature = "atomics"))),
- ))]
- {
- #[thread_local]
- static mut VAL: $t = INIT_EXPR;
-
- // If a dtor isn't needed we can do something "very raw" and
- // just get going.
- if !$crate::mem::needs_drop::<$t>() {
- unsafe {
- return $crate::option::Option::Some(&VAL)
- }
- }
-
- // 0 == dtor not registered
- // 1 == dtor registered, dtor not run
- // 2 == dtor registered and is running or has run
- #[thread_local]
- static mut STATE: $crate::primitive::u8 = 0;
-
- unsafe extern "C" fn destroy(ptr: *mut $crate::primitive::u8) {
- let ptr = ptr as *mut $t;
-
- unsafe {
- $crate::debug_assert_eq!(STATE, 1);
- STATE = 2;
- $crate::ptr::drop_in_place(ptr);
- }
- }
-
- unsafe {
- match STATE {
- // 0 == we haven't registered a destructor, so do
- // so now.
- 0 => {
- $crate::thread::__FastLocalKeyInner::<$t>::register_dtor(
- $crate::ptr::addr_of_mut!(VAL) as *mut $crate::primitive::u8,
- destroy,
- );
- STATE = 1;
- $crate::option::Option::Some(&VAL)
- }
- // 1 == the destructor is registered and the value
- // is valid, so return the pointer.
- 1 => $crate::option::Option::Some(&VAL),
- // otherwise the destructor has already run, so we
- // can't give access.
- _ => $crate::option::Option::None,
- }
- }
- }
-
- // On platforms without `#[thread_local]` we fall back to the
- // same implementation as below for os thread locals.
- #[cfg(all(
- not(target_thread_local),
- not(all(target_family = "wasm", not(target_feature = "atomics"))),
- ))]
- {
- #[inline]
- const fn __init() -> $t { INIT_EXPR }
- static __KEY: $crate::thread::__OsLocalKeyInner<$t> =
- $crate::thread::__OsLocalKeyInner::new();
- #[allow(unused_unsafe)]
- unsafe {
- __KEY.get(move || {
- if let $crate::option::Option::Some(init) = _init {
- if let $crate::option::Option::Some(value) = init.take() {
- return value;
- } else if $crate::cfg!(debug_assertions) {
- $crate::unreachable!("missing initial value");
- }
- }
- __init()
- })
- }
- }
- }
-
- unsafe {
- $crate::thread::LocalKey::new(__getit)
- }
- }};
-
- // used to generate the `LocalKey` value for `thread_local!`
- (@key $t:ty, $init:expr) => {
- {
- #[inline]
- fn __init() -> $t { $init }
-
- // When reading this function you might ask "why is this inlined
- // everywhere other than Windows?", and that's a very reasonable
- // question to ask. The short story is that it segfaults rustc if
- // this function is inlined. The longer story is that Windows looks
- // to not support `extern` references to thread locals across DLL
- // boundaries. This appears to at least not be supported in the ABI
- // that LLVM implements.
- //
- // Because of this we never inline on Windows, but we do inline on
- // other platforms (where external references to thread locals
- // across DLLs are supported). A better fix for this would be to
- // inline this function on Windows, but only for "statically linked"
- // components. For example if two separately compiled rlibs end up
- // getting linked into a DLL then it's fine to inline this function
- // across that boundary. It's only not fine to inline this function
- // across a DLL boundary. Unfortunately rustc doesn't currently
- // have this sort of logic available in an attribute, and it's not
- // clear that rustc is even equipped to answer this (it's more of a
- // Cargo question kinda). This means that, unfortunately, Windows
- // gets the pessimistic path for now where it's never inlined.
- //
- // The issue of "should enable on Windows sometimes" is #84933
- #[cfg_attr(not(windows), inline)]
- unsafe fn __getit(
- init: $crate::option::Option<&mut $crate::option::Option<$t>>,
- ) -> $crate::option::Option<&'static $t> {
- #[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
- static __KEY: $crate::thread::__StaticLocalKeyInner<$t> =
- $crate::thread::__StaticLocalKeyInner::new();
-
- #[thread_local]
- #[cfg(all(
- target_thread_local,
- not(all(target_family = "wasm", not(target_feature = "atomics"))),
- ))]
- static __KEY: $crate::thread::__FastLocalKeyInner<$t> =
- $crate::thread::__FastLocalKeyInner::new();
-
- #[cfg(all(
- not(target_thread_local),
- not(all(target_family = "wasm", not(target_feature = "atomics"))),
- ))]
- static __KEY: $crate::thread::__OsLocalKeyInner<$t> =
- $crate::thread::__OsLocalKeyInner::new();
-
- // FIXME: remove the #[allow(...)] marker when macros don't
- // raise warning for missing/extraneous unsafe blocks anymore.
- // See https://github.com/rust-lang/rust/issues/74838.
- #[allow(unused_unsafe)]
- unsafe {
- __KEY.get(move || {
- if let $crate::option::Option::Some(init) = init {
- if let $crate::option::Option::Some(value) = init.take() {
- return value;
- } else if $crate::cfg!(debug_assertions) {
- $crate::unreachable!("missing default value");
- }
- }
- __init()
- })
- }
- }
-
- unsafe {
- $crate::thread::LocalKey::new(__getit)
- }
- }
- };
- ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $($init:tt)*) => {
- $(#[$attr])* $vis const $name: $crate::thread::LocalKey<$t> =
- $crate::__thread_local_inner!(@key $t, $($init)*);
- }
-}
-
/// An error returned by [`LocalKey::try_with`](struct.LocalKey.html#method.try_with).
#[stable(feature = "thread_local_try_with", since = "1.26.0")]
#[non_exhaustive]
@@ -779,376 +585,3 @@ impl<T: 'static> LocalKey<RefCell<T>> {
self.with(|cell| cell.replace(value))
}
}
-
-mod lazy {
- use crate::cell::UnsafeCell;
- use crate::hint;
- use crate::mem;
-
- pub struct LazyKeyInner<T> {
- inner: UnsafeCell<Option<T>>,
- }
-
- impl<T> LazyKeyInner<T> {
- pub const fn new() -> LazyKeyInner<T> {
- LazyKeyInner { inner: UnsafeCell::new(None) }
- }
-
- pub unsafe fn get(&self) -> Option<&'static T> {
- // SAFETY: The caller must ensure no reference is ever handed out to
- // the inner cell nor mutable reference to the Option<T> inside said
- // cell. This make it safe to hand a reference, though the lifetime
- // of 'static is itself unsafe, making the get method unsafe.
- unsafe { (*self.inner.get()).as_ref() }
- }
-
- /// The caller must ensure that no reference is active: this method
- /// needs unique access.
- pub unsafe fn initialize<F: FnOnce() -> T>(&self, init: F) -> &'static T {
- // Execute the initialization up front, *then* move it into our slot,
- // just in case initialization fails.
- let value = init();
- let ptr = self.inner.get();
-
- // SAFETY:
- //
- // note that this can in theory just be `*ptr = Some(value)`, but due to
- // the compiler will currently codegen that pattern with something like:
- //
- // ptr::drop_in_place(ptr)
- // ptr::write(ptr, Some(value))
- //
- // Due to this pattern it's possible for the destructor of the value in
- // `ptr` (e.g., if this is being recursively initialized) to re-access
- // TLS, in which case there will be a `&` and `&mut` pointer to the same
- // value (an aliasing violation). To avoid setting the "I'm running a
- // destructor" flag we just use `mem::replace` which should sequence the
- // operations a little differently and make this safe to call.
- //
- // The precondition also ensures that we are the only one accessing
- // `self` at the moment so replacing is fine.
- unsafe {
- let _ = mem::replace(&mut *ptr, Some(value));
- }
-
- // SAFETY: With the call to `mem::replace` it is guaranteed there is
- // a `Some` behind `ptr`, not a `None` so `unreachable_unchecked`
- // will never be reached.
- unsafe {
- // After storing `Some` we want to get a reference to the contents of
- // what we just stored. While we could use `unwrap` here and it should
- // always work it empirically doesn't seem to always get optimized away,
- // which means that using something like `try_with` can pull in
- // panicking code and cause a large size bloat.
- match *ptr {
- Some(ref x) => x,
- None => hint::unreachable_unchecked(),
- }
- }
- }
-
- /// The other methods hand out references while taking &self.
- /// As such, callers of this method must ensure no `&` and `&mut` are
- /// available and used at the same time.
- #[allow(unused)]
- pub unsafe fn take(&mut self) -> Option<T> {
- // SAFETY: See doc comment for this method.
- unsafe { (*self.inner.get()).take() }
- }
- }
-}
-
-/// On some targets like wasm there's no threads, so no need to generate
-/// thread locals and we can instead just use plain statics!
-#[doc(hidden)]
-#[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
-pub mod statik {
- use super::lazy::LazyKeyInner;
- use crate::fmt;
-
- pub struct Key<T> {
- inner: LazyKeyInner<T>,
- }
-
- unsafe impl<T> Sync for Key<T> {}
-
- impl<T> fmt::Debug for Key<T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Key").finish_non_exhaustive()
- }
- }
-
- impl<T> Key<T> {
- pub const fn new() -> Key<T> {
- Key { inner: LazyKeyInner::new() }
- }
-
- pub unsafe fn get(&self, init: impl FnOnce() -> T) -> Option<&'static T> {
- // SAFETY: The caller must ensure no reference is ever handed out to
- // the inner cell nor mutable reference to the Option<T> inside said
- // cell. This make it safe to hand a reference, though the lifetime
- // of 'static is itself unsafe, making the get method unsafe.
- let value = unsafe {
- match self.inner.get() {
- Some(ref value) => value,
- None => self.inner.initialize(init),
- }
- };
-
- Some(value)
- }
- }
-}
-
-#[doc(hidden)]
-#[cfg(all(target_thread_local, not(all(target_family = "wasm", not(target_feature = "atomics"))),))]
-pub mod fast {
- use super::lazy::LazyKeyInner;
- use crate::cell::Cell;
- use crate::sys::thread_local_dtor::register_dtor;
- use crate::{fmt, mem, panic};
-
- #[derive(Copy, Clone)]
- enum DtorState {
- Unregistered,
- Registered,
- RunningOrHasRun,
- }
-
- // This data structure has been carefully constructed so that the fast path
- // only contains one branch on x86. That optimization is necessary to avoid
- // duplicated tls lookups on OSX.
- //
- // LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722
- pub struct Key<T> {
- // If `LazyKeyInner::get` returns `None`, that indicates either:
- // * The value has never been initialized
- // * The value is being recursively initialized
- // * The value has already been destroyed or is being destroyed
- // To determine which kind of `None`, check `dtor_state`.
- //
- // This is very optimizer friendly for the fast path - initialized but
- // not yet dropped.
- inner: LazyKeyInner<T>,
-
- // Metadata to keep track of the state of the destructor. Remember that
- // this variable is thread-local, not global.
- dtor_state: Cell<DtorState>,
- }
-
- impl<T> fmt::Debug for Key<T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Key").finish_non_exhaustive()
- }
- }
-
- impl<T> Key<T> {
- pub const fn new() -> Key<T> {
- Key { inner: LazyKeyInner::new(), dtor_state: Cell::new(DtorState::Unregistered) }
- }
-
- // note that this is just a publicly-callable function only for the
- // const-initialized form of thread locals, basically a way to call the
- // free `register_dtor` function defined elsewhere in std.
- pub unsafe fn register_dtor(a: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
- unsafe {
- register_dtor(a, dtor);
- }
- }
-
- pub unsafe fn get<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> {
- // SAFETY: See the definitions of `LazyKeyInner::get` and
- // `try_initialize` for more information.
- //
- // The caller must ensure no mutable references are ever active to
- // the inner cell or the inner T when this is called.
- // The `try_initialize` is dependant on the passed `init` function
- // for this.
- unsafe {
- match self.inner.get() {
- Some(val) => Some(val),
- None => self.try_initialize(init),
- }
- }
- }
-
- // `try_initialize` is only called once per fast thread local variable,
- // except in corner cases where thread_local dtors reference other
- // thread_local's, or it is being recursively initialized.
- //
- // Macos: Inlining this function can cause two `tlv_get_addr` calls to
- // be performed for every call to `Key::get`.
- // LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722
- #[inline(never)]
- unsafe fn try_initialize<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> {
- // SAFETY: See comment above (this function doc).
- if !mem::needs_drop::<T>() || unsafe { self.try_register_dtor() } {
- // SAFETY: See comment above (this function doc).
- Some(unsafe { self.inner.initialize(init) })
- } else {
- None
- }
- }
-
- // `try_register_dtor` is only called once per fast thread local
- // variable, except in corner cases where thread_local dtors reference
- // other thread_local's, or it is being recursively initialized.
- unsafe fn try_register_dtor(&self) -> bool {
- match self.dtor_state.get() {
- DtorState::Unregistered => {
- // SAFETY: dtor registration happens before initialization.
- // Passing `self` as a pointer while using `destroy_value<T>`
- // is safe because the function will build a pointer to a
- // Key<T>, which is the type of self and so find the correct
- // size.
- unsafe { register_dtor(self as *const _ as *mut u8, destroy_value::<T>) };
- self.dtor_state.set(DtorState::Registered);
- true
- }
- DtorState::Registered => {
- // recursively initialized
- true
- }
- DtorState::RunningOrHasRun => false,
- }
- }
- }
-
- unsafe extern "C" fn destroy_value<T>(ptr: *mut u8) {
- let ptr = ptr as *mut Key<T>;
-
- // SAFETY:
- //
- // The pointer `ptr` has been built just above and comes from
- // `try_register_dtor` where it is originally a Key<T> coming from `self`,
- // making it non-NUL and of the correct type.
- //
- // Right before we run the user destructor be sure to set the
- // `Option<T>` to `None`, and `dtor_state` to `RunningOrHasRun`. This
- // causes future calls to `get` to run `try_initialize_drop` again,
- // which will now fail, and return `None`.
- //
- // Wrap the call in a catch to ensure unwinding is caught in the event
- // a panic takes place in a destructor.
- if let Err(_) = panic::catch_unwind(panic::AssertUnwindSafe(|| unsafe {
- let value = (*ptr).inner.take();
- (*ptr).dtor_state.set(DtorState::RunningOrHasRun);
- drop(value);
- })) {
- rtabort!("thread local panicked on drop");
- }
- }
-}
-
-#[doc(hidden)]
-#[cfg(all(
- not(target_thread_local),
- not(all(target_family = "wasm", not(target_feature = "atomics"))),
-))]
-pub mod os {
- use super::lazy::LazyKeyInner;
- use crate::cell::Cell;
- use crate::sys_common::thread_local_key::StaticKey as OsStaticKey;
- use crate::{fmt, marker, panic, ptr};
-
- /// Use a regular global static to store this key; the state provided will then be
- /// thread-local.
- pub struct Key<T> {
- // OS-TLS key that we'll use to key off.
- os: OsStaticKey,
- marker: marker::PhantomData<Cell<T>>,
- }
-
- impl<T> fmt::Debug for Key<T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Key").finish_non_exhaustive()
- }
- }
-
- unsafe impl<T> Sync for Key<T> {}
-
- struct Value<T: 'static> {
- inner: LazyKeyInner<T>,
- key: &'static Key<T>,
- }
-
- impl<T: 'static> Key<T> {
- #[rustc_const_unstable(feature = "thread_local_internals", issue = "none")]
- pub const fn new() -> Key<T> {
- Key { os: OsStaticKey::new(Some(destroy_value::<T>)), marker: marker::PhantomData }
- }
-
- /// It is a requirement for the caller to ensure that no mutable
- /// reference is active when this method is called.
- pub unsafe fn get(&'static self, init: impl FnOnce() -> T) -> Option<&'static T> {
- // SAFETY: See the documentation for this method.
- let ptr = unsafe { self.os.get() as *mut Value<T> };
- if ptr.addr() > 1 {
- // SAFETY: the check ensured the pointer is safe (its destructor
- // is not running) + it is coming from a trusted source (self).
- if let Some(ref value) = unsafe { (*ptr).inner.get() } {
- return Some(value);
- }
- }
- // SAFETY: At this point we are sure we have no value and so
- // initializing (or trying to) is safe.
- unsafe { self.try_initialize(init) }
- }
-
- // `try_initialize` is only called once per os thread local variable,
- // except in corner cases where thread_local dtors reference other
- // thread_local's, or it is being recursively initialized.
- unsafe fn try_initialize(&'static self, init: impl FnOnce() -> T) -> Option<&'static T> {
- // SAFETY: No mutable references are ever handed out meaning getting
- // the value is ok.
- let ptr = unsafe { self.os.get() as *mut Value<T> };
- if ptr.addr() == 1 {
- // destructor is running
- return None;
- }
-
- let ptr = if ptr.is_null() {
- // If the lookup returned null, we haven't initialized our own
- // local copy, so do that now.
- let ptr = Box::into_raw(Box::new(Value { inner: LazyKeyInner::new(), key: self }));
- // SAFETY: At this point we are sure there is no value inside
- // ptr so setting it will not affect anyone else.
- unsafe {
- self.os.set(ptr as *mut u8);
- }
- ptr
- } else {
- // recursive initialization
- ptr
- };
-
- // SAFETY: ptr has been ensured as non-NUL just above an so can be
- // dereferenced safely.
- unsafe { Some((*ptr).inner.initialize(init)) }
- }
- }
-
- unsafe extern "C" fn destroy_value<T: 'static>(ptr: *mut u8) {
- // SAFETY:
- //
- // The OS TLS ensures that this key contains a null value when this
- // destructor starts to run. We set it back to a sentinel value of 1 to
- // ensure that any future calls to `get` for this thread will return
- // `None`.
- //
- // Note that to prevent an infinite loop we reset it back to null right
- // before we return from the destructor ourselves.
- //
- // Wrap the call in a catch to ensure unwinding is caught in the event
- // a panic takes place in a destructor.
- if let Err(_) = panic::catch_unwind(|| unsafe {
- let ptr = Box::from_raw(ptr as *mut Value<T>);
- let key = ptr.key;
- key.os.set(ptr::invalid_mut(1));
- drop(ptr);
- key.os.set(ptr::null_mut());
- }) {
- rtabort!("thread local panicked on drop");
- }
- }
-}
diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs
index 489af7767..13b845b25 100644
--- a/library/std/src/thread/mod.rs
+++ b/library/std/src/thread/mod.rs
@@ -131,7 +131,8 @@
//!
//! * Build the thread with [`Builder`] and pass the desired stack size to [`Builder::stack_size`].
//! * Set the `RUST_MIN_STACK` environment variable to an integer representing the desired stack
-//! size (in bytes). Note that setting [`Builder::stack_size`] will override this.
+//! size (in bytes). Note that setting [`Builder::stack_size`] will override this. Be aware that
+//! changes to `RUST_MIN_STACK` may be ignored after program start.
//!
//! Note that the stack size of the main thread is *not* determined by Rust.
//!
@@ -203,44 +204,9 @@ pub use self::local::{AccessError, LocalKey};
// by the elf linker. "static" is for single-threaded platforms where a global
// static is sufficient.
-#[unstable(feature = "libstd_thread_internals", issue = "none")]
-#[cfg(not(test))]
-#[cfg(all(
- target_thread_local,
- not(all(target_family = "wasm", not(target_feature = "atomics"))),
-))]
-#[doc(hidden)]
-pub use self::local::fast::Key as __FastLocalKeyInner;
-// when building for tests, use real std's type
-#[unstable(feature = "libstd_thread_internals", issue = "none")]
-#[cfg(test)]
-#[cfg(all(
- target_thread_local,
- not(all(target_family = "wasm", not(target_feature = "atomics"))),
-))]
-pub use realstd::thread::__FastLocalKeyInner;
-
-#[unstable(feature = "libstd_thread_internals", issue = "none")]
-#[cfg(not(test))]
-#[cfg(all(
- not(target_thread_local),
- not(all(target_family = "wasm", not(target_feature = "atomics"))),
-))]
#[doc(hidden)]
-pub use self::local::os::Key as __OsLocalKeyInner;
-// when building for tests, use real std's type
-#[unstable(feature = "libstd_thread_internals", issue = "none")]
-#[cfg(test)]
-#[cfg(all(
- not(target_thread_local),
- not(all(target_family = "wasm", not(target_feature = "atomics"))),
-))]
-pub use realstd::thread::__OsLocalKeyInner;
-
#[unstable(feature = "libstd_thread_internals", issue = "none")]
-#[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
-#[doc(hidden)]
-pub use self::local::statik::Key as __StaticLocalKeyInner;
+pub use crate::sys::common::thread_local::Key as __LocalKeyInner;
////////////////////////////////////////////////////////////////////////////////
// Builder
diff --git a/library/stdarch/ci/docker/wasm32-wasi/Dockerfile b/library/stdarch/ci/docker/wasm32-wasi/Dockerfile
index 3e250f8b5..fff41a0eb 100644
--- a/library/stdarch/ci/docker/wasm32-wasi/Dockerfile
+++ b/library/stdarch/ci/docker/wasm32-wasi/Dockerfile
@@ -7,11 +7,10 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \
xz-utils \
clang
-RUN curl -L https://github.com/bytecodealliance/wasmtime/releases/download/v0.29.0/wasmtime-v0.29.0-x86_64-linux.tar.xz | tar xJf -
-ENV PATH=$PATH:/wasmtime-v0.29.0-x86_64-linux
+RUN curl -L https://github.com/bytecodealliance/wasmtime/releases/download/dev/wasmtime-dev-x86_64-linux.tar.xz | tar xJf -
+ENV PATH=$PATH:/wasmtime-dev-x86_64-linux
ENV CARGO_TARGET_WASM32_WASI_RUNNER="wasmtime \
- --enable-simd \
- --enable-threads \
+ --wasm-features=threads,relaxed-simd \
--mapdir .::/checkout/target/wasm32-wasi/release/deps \
--"
diff --git a/library/stdarch/crates/core_arch/src/aarch64/armclang.rs b/library/stdarch/crates/core_arch/src/aarch64/armclang.rs
index 7ad6ae50c..9a608702a 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/armclang.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/armclang.rs
@@ -18,6 +18,6 @@ use stdarch_test::assert_instr;
#[inline(always)]
#[rustc_legacy_const_generics(0)]
pub unsafe fn __breakpoint<const VAL: i32>() {
- static_assert_imm16!(VAL);
+ static_assert_uimm_bits!(VAL, 16);
crate::arch::asm!("brk {}", const VAL);
}
diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
index ac05a0c23..cb5413fa3 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs
@@ -191,8 +191,8 @@ pub unsafe fn vabdd_f64(a: f64, b: f64) -> f64 {
#[cfg_attr(test, assert_instr(uabdl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
- let c: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
- let d: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
simd_cast(vabd_u8(c, d))
}
@@ -204,8 +204,8 @@ pub unsafe fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(uabdl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
- let c: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
- let d: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
+ let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
simd_cast(vabd_u16(c, d))
}
@@ -217,8 +217,8 @@ pub unsafe fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
#[cfg_attr(test, assert_instr(uabdl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
- let c: uint32x2_t = simd_shuffle2!(a, a, [2, 3]);
- let d: uint32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
+ let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
simd_cast(vabd_u32(c, d))
}
@@ -230,8 +230,8 @@ pub unsafe fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
#[cfg_attr(test, assert_instr(sabdl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
- let c: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
- let d: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
let e: uint8x8_t = simd_cast(vabd_s8(c, d));
simd_cast(e)
}
@@ -244,8 +244,8 @@ pub unsafe fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
#[cfg_attr(test, assert_instr(sabdl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
- let c: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
- let d: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
+ let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
let e: uint16x4_t = simd_cast(vabd_s16(c, d));
simd_cast(e)
}
@@ -258,8 +258,8 @@ pub unsafe fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(sabdl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
- let c: int32x2_t = simd_shuffle2!(a, a, [2, 3]);
- let d: int32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
+ let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
let e: uint32x2_t = simd_cast(vabd_s32(c, d));
simd_cast(e)
}
@@ -2277,17 +2277,17 @@ pub unsafe fn vcaled_f64(a: f64, b: f64) -> u64 {
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- static_assert_imm3!(LANE1);
- static_assert_imm3!(LANE2);
+ static_assert_uimm_bits!(LANE1, 3);
+ static_assert_uimm_bits!(LANE2, 3);
match LANE1 & 0b111 {
- 0 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
- 2 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
- 3 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
- 4 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
- 5 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
- 6 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
- 7 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2301,25 +2301,25 @@ pub unsafe fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b:
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- static_assert_imm4!(LANE1);
- static_assert_imm4!(LANE2);
+ static_assert_uimm_bits!(LANE1, 4);
+ static_assert_uimm_bits!(LANE2, 4);
match LANE1 & 0b1111 {
- 0 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 1 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 2 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 3 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 4 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 5 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 6 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 7 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
- 8 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
- 9 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
- 10 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
- 11 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
- 12 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
- 13 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
- 14 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
- 15 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
+ 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
+ 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
+ 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
+ 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
+ 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
+ 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
+ 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2333,13 +2333,13 @@ pub unsafe fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- static_assert_imm2!(LANE1);
- static_assert_imm2!(LANE2);
+ static_assert_uimm_bits!(LANE1, 2);
+ static_assert_uimm_bits!(LANE2, 2);
match LANE1 & 0b11 {
- 0 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [4 + LANE2 as u32, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 4 + LANE2 as u32, 2, 3]),
- 2 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 4 + LANE2 as u32, 3]),
- 3 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 4 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
+ 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2353,17 +2353,17 @@ pub unsafe fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- static_assert_imm3!(LANE1);
- static_assert_imm3!(LANE2);
+ static_assert_uimm_bits!(LANE1, 3);
+ static_assert_uimm_bits!(LANE2, 3);
match LANE1 & 0b111 {
- 0 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
- 2 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
- 3 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
- 4 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
- 5 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
- 6 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
- 7 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2377,11 +2377,11 @@ pub unsafe fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(a: int16x8_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- static_assert_imm1!(LANE1);
- static_assert_imm1!(LANE2);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert_uimm_bits!(LANE2, 1);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [2 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 2 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2395,13 +2395,13 @@ pub unsafe fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- static_assert_imm2!(LANE1);
- static_assert_imm2!(LANE2);
+ static_assert_uimm_bits!(LANE1, 2);
+ static_assert_uimm_bits!(LANE2, 2);
match LANE1 & 0b11 {
- 0 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [4 + LANE2 as u32, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 4 + LANE2 as u32, 2, 3]),
- 2 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 4 + LANE2 as u32, 3]),
- 3 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 4 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
+ 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2415,11 +2415,11 @@ pub unsafe fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(a: int32x4_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- static_assert_imm1!(LANE1);
- static_assert_imm1!(LANE2);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert_uimm_bits!(LANE2, 1);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [2 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 2 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2433,17 +2433,17 @@ pub unsafe fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(a: int64x2_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- static_assert_imm3!(LANE1);
- static_assert_imm3!(LANE2);
+ static_assert_uimm_bits!(LANE1, 3);
+ static_assert_uimm_bits!(LANE2, 3);
match LANE1 & 0b111 {
- 0 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
- 2 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
- 3 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
- 4 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
- 5 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
- 6 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
- 7 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2457,25 +2457,25 @@ pub unsafe fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b:
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- static_assert_imm4!(LANE1);
- static_assert_imm4!(LANE2);
+ static_assert_uimm_bits!(LANE1, 4);
+ static_assert_uimm_bits!(LANE2, 4);
match LANE1 & 0b1111 {
- 0 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 1 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 2 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 3 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 4 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 5 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 6 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 7 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
- 8 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
- 9 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
- 10 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
- 11 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
- 12 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
- 13 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
- 14 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
- 15 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
+ 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
+ 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
+ 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
+ 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
+ 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
+ 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
+ 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2489,13 +2489,13 @@ pub unsafe fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(a: uint8x16_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- static_assert_imm2!(LANE1);
- static_assert_imm2!(LANE2);
+ static_assert_uimm_bits!(LANE1, 2);
+ static_assert_uimm_bits!(LANE2, 2);
match LANE1 & 0b11 {
- 0 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [4 + LANE2 as u32, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 4 + LANE2 as u32, 2, 3]),
- 2 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 4 + LANE2 as u32, 3]),
- 3 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 4 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
+ 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2509,17 +2509,17 @@ pub unsafe fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(a: uint16x4_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- static_assert_imm3!(LANE1);
- static_assert_imm3!(LANE2);
+ static_assert_uimm_bits!(LANE1, 3);
+ static_assert_uimm_bits!(LANE2, 3);
match LANE1 & 0b111 {
- 0 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
- 2 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
- 3 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
- 4 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
- 5 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
- 6 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
- 7 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2533,11 +2533,11 @@ pub unsafe fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(a: uint16x8_t
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- static_assert_imm1!(LANE1);
- static_assert_imm1!(LANE2);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert_uimm_bits!(LANE2, 1);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [2 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 2 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2551,13 +2551,13 @@ pub unsafe fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(a: uint32x2_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- static_assert_imm2!(LANE1);
- static_assert_imm2!(LANE2);
+ static_assert_uimm_bits!(LANE1, 2);
+ static_assert_uimm_bits!(LANE2, 2);
match LANE1 & 0b11 {
- 0 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [4 + LANE2 as u32, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 4 + LANE2 as u32, 2, 3]),
- 2 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 4 + LANE2 as u32, 3]),
- 3 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 4 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
+ 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2571,11 +2571,11 @@ pub unsafe fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(a: uint32x4_t
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- static_assert_imm1!(LANE1);
- static_assert_imm1!(LANE2);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert_uimm_bits!(LANE2, 1);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [2 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 2 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2589,17 +2589,17 @@ pub unsafe fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(a: uint64x2_t
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
- static_assert_imm3!(LANE1);
- static_assert_imm3!(LANE2);
+ static_assert_uimm_bits!(LANE1, 3);
+ static_assert_uimm_bits!(LANE2, 3);
match LANE1 & 0b111 {
- 0 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
- 2 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
- 3 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
- 4 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
- 5 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
- 6 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
- 7 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2613,25 +2613,25 @@ pub unsafe fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b:
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
- static_assert_imm4!(LANE1);
- static_assert_imm4!(LANE2);
+ static_assert_uimm_bits!(LANE1, 4);
+ static_assert_uimm_bits!(LANE2, 4);
match LANE1 & 0b1111 {
- 0 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 1 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 2 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 3 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 4 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 5 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 6 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 7 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
- 8 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
- 9 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
- 10 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
- 11 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
- 12 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
- 13 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
- 14 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
- 15 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
+ 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
+ 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
+ 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
+ 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
+ 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
+ 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
+ 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2645,13 +2645,13 @@ pub unsafe fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(a: poly8x16_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
- static_assert_imm2!(LANE1);
- static_assert_imm2!(LANE2);
+ static_assert_uimm_bits!(LANE1, 2);
+ static_assert_uimm_bits!(LANE2, 2);
match LANE1 & 0b11 {
- 0 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [4 + LANE2 as u32, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 4 + LANE2 as u32, 2, 3]),
- 2 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 4 + LANE2 as u32, 3]),
- 3 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 4 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
+ 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2665,17 +2665,17 @@ pub unsafe fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(a: poly16x4_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
- static_assert_imm3!(LANE1);
- static_assert_imm3!(LANE2);
+ static_assert_uimm_bits!(LANE1, 3);
+ static_assert_uimm_bits!(LANE2, 3);
match LANE1 & 0b111 {
- 0 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
- 2 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
- 3 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
- 4 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
- 5 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
- 6 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
- 7 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2689,11 +2689,11 @@ pub unsafe fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(a: poly16x8_t
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
- static_assert_imm1!(LANE1);
- static_assert_imm1!(LANE2);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert_uimm_bits!(LANE2, 1);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [2 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 2 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2707,11 +2707,11 @@ pub unsafe fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(a: poly64x2_t
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
- static_assert_imm1!(LANE1);
- static_assert_imm1!(LANE2);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert_uimm_bits!(LANE2, 1);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [2 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 2 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2725,13 +2725,13 @@ pub unsafe fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(a: float32x2_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
- static_assert_imm2!(LANE1);
- static_assert_imm2!(LANE2);
+ static_assert_uimm_bits!(LANE1, 2);
+ static_assert_uimm_bits!(LANE2, 2);
match LANE1 & 0b11 {
- 0 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [4 + LANE2 as u32, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 4 + LANE2 as u32, 2, 3]),
- 2 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 4 + LANE2 as u32, 3]),
- 3 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 4 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
+ 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2745,11 +2745,11 @@ pub unsafe fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(a: float32x4_
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
- static_assert_imm1!(LANE1);
- static_assert_imm1!(LANE2);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert_uimm_bits!(LANE2, 1);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [2 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 2 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2763,18 +2763,18 @@ pub unsafe fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(a: float64x2_
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
- static_assert_imm3!(LANE1);
- static_assert_imm4!(LANE2);
- let a: int8x16_t = simd_shuffle16!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
+ static_assert_uimm_bits!(LANE1, 3);
+ static_assert_uimm_bits!(LANE2, 4);
+ let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
match LANE1 & 0b111 {
- 0 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
- 2 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
- 3 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
- 4 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
- 5 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
- 6 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
- 7 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2788,14 +2788,14 @@ pub unsafe fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b:
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x8_t) -> int16x4_t {
- static_assert_imm2!(LANE1);
- static_assert_imm3!(LANE2);
- let a: int16x8_t = simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
+ static_assert_uimm_bits!(LANE1, 2);
+ static_assert_uimm_bits!(LANE2, 3);
+ let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
match LANE1 & 0b11 {
- 0 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [8 + LANE2 as u32, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 8 + LANE2 as u32, 2, 3]),
- 2 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 8 + LANE2 as u32, 3]),
- 3 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 8 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
+ 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2809,12 +2809,12 @@ pub unsafe fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x4_t) -> int32x2_t {
- static_assert_imm1!(LANE1);
- static_assert_imm2!(LANE2);
- let a: int32x4_t = simd_shuffle4!(a, a, [0, 1, 2, 3]);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert_uimm_bits!(LANE2, 2);
+ let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [4 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 4 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2828,18 +2828,18 @@ pub unsafe fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x16_t) -> uint8x8_t {
- static_assert_imm3!(LANE1);
- static_assert_imm4!(LANE2);
- let a: uint8x16_t = simd_shuffle16!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
+ static_assert_uimm_bits!(LANE1, 3);
+ static_assert_uimm_bits!(LANE2, 4);
+ let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
match LANE1 & 0b111 {
- 0 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
- 2 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
- 3 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
- 4 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
- 5 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
- 6 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
- 7 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2853,14 +2853,14 @@ pub unsafe fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t {
- static_assert_imm2!(LANE1);
- static_assert_imm3!(LANE2);
- let a: uint16x8_t = simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
+ static_assert_uimm_bits!(LANE1, 2);
+ static_assert_uimm_bits!(LANE2, 3);
+ let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
match LANE1 & 0b11 {
- 0 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [8 + LANE2 as u32, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 8 + LANE2 as u32, 2, 3]),
- 2 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 8 + LANE2 as u32, 3]),
- 3 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 8 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
+ 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2874,12 +2874,12 @@ pub unsafe fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(a: uint16x4_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t {
- static_assert_imm1!(LANE1);
- static_assert_imm2!(LANE2);
- let a: uint32x4_t = simd_shuffle4!(a, a, [0, 1, 2, 3]);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert_uimm_bits!(LANE2, 2);
+ let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [4 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 4 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2893,18 +2893,18 @@ pub unsafe fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(a: uint32x2_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x16_t) -> poly8x8_t {
- static_assert_imm3!(LANE1);
- static_assert_imm4!(LANE2);
- let a: poly8x16_t = simd_shuffle16!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
+ static_assert_uimm_bits!(LANE1, 3);
+ static_assert_uimm_bits!(LANE2, 4);
+ let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
match LANE1 & 0b111 {
- 0 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
- 2 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
- 3 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
- 4 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
- 5 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
- 6 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
- 7 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2918,14 +2918,14 @@ pub unsafe fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(a: poly16x4_t, b: poly16x8_t) -> poly16x4_t {
- static_assert_imm2!(LANE1);
- static_assert_imm3!(LANE2);
- let a: poly16x8_t = simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
+ static_assert_uimm_bits!(LANE1, 2);
+ static_assert_uimm_bits!(LANE2, 3);
+ let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
match LANE1 & 0b11 {
- 0 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [8 + LANE2 as u32, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 8 + LANE2 as u32, 2, 3]),
- 2 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 8 + LANE2 as u32, 3]),
- 3 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 8 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
+ 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2939,12 +2939,12 @@ pub unsafe fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(a: poly16x4_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
- static_assert_imm1!(LANE1);
- static_assert_imm2!(LANE2);
- let a: float32x4_t = simd_shuffle4!(a, a, [0, 1, 2, 3]);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert_uimm_bits!(LANE2, 2);
+ let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [4 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 4 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2958,26 +2958,26 @@ pub unsafe fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(a: float32x2_t
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
- static_assert_imm4!(LANE1);
- static_assert_imm3!(LANE2);
- let b: int8x16_t = simd_shuffle16!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
+ static_assert_uimm_bits!(LANE1, 4);
+ static_assert_uimm_bits!(LANE2, 3);
+ let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
match LANE1 & 0b1111 {
- 0 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 1 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 2 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 3 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 4 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 5 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 6 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 7 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
- 8 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
- 9 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
- 10 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
- 11 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
- 12 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
- 13 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
- 14 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
- 15 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
+ 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
+ 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
+ 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
+ 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
+ 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
+ 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
+ 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -2991,18 +2991,18 @@ pub unsafe fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
- static_assert_imm3!(LANE1);
- static_assert_imm2!(LANE2);
- let b: int16x8_t = simd_shuffle8!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);
+ static_assert_uimm_bits!(LANE1, 3);
+ static_assert_uimm_bits!(LANE2, 2);
+ let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);
match LANE1 & 0b111 {
- 0 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
- 2 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
- 3 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
- 4 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
- 5 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
- 6 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
- 7 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -3016,14 +3016,14 @@ pub unsafe fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x8_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
- static_assert_imm2!(LANE1);
- static_assert_imm1!(LANE2);
- let b: int32x4_t = simd_shuffle4!(b, b, [0, 1, 2, 3]);
+ static_assert_uimm_bits!(LANE1, 2);
+ static_assert_uimm_bits!(LANE2, 1);
+ let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);
match LANE1 & 0b11 {
- 0 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [4 + LANE2 as u32, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 4 + LANE2 as u32, 2, 3]),
- 2 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 4 + LANE2 as u32, 3]),
- 3 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 4 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
+ 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -3037,26 +3037,26 @@ pub unsafe fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x4_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
- static_assert_imm4!(LANE1);
- static_assert_imm3!(LANE2);
- let b: uint8x16_t = simd_shuffle16!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
+ static_assert_uimm_bits!(LANE1, 4);
+ static_assert_uimm_bits!(LANE2, 3);
+ let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
match LANE1 & 0b1111 {
- 0 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 1 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 2 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 3 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 4 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 5 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 6 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 7 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
- 8 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
- 9 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
- 10 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
- 11 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
- 12 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
- 13 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
- 14 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
- 15 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
+ 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
+ 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
+ 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
+ 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
+ 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
+ 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
+ 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -3070,18 +3070,18 @@ pub unsafe fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x16_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t {
- static_assert_imm3!(LANE1);
- static_assert_imm2!(LANE2);
- let b: uint16x8_t = simd_shuffle8!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);
+ static_assert_uimm_bits!(LANE1, 3);
+ static_assert_uimm_bits!(LANE2, 2);
+ let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);
match LANE1 & 0b111 {
- 0 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
- 2 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
- 3 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
- 4 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
- 5 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
- 6 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
- 7 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -3095,14 +3095,14 @@ pub unsafe fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(a: uint16x8_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t {
- static_assert_imm2!(LANE1);
- static_assert_imm1!(LANE2);
- let b: uint32x4_t = simd_shuffle4!(b, b, [0, 1, 2, 3]);
+ static_assert_uimm_bits!(LANE1, 2);
+ static_assert_uimm_bits!(LANE2, 1);
+ let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);
match LANE1 & 0b11 {
- 0 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [4 + LANE2 as u32, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 4 + LANE2 as u32, 2, 3]),
- 2 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 4 + LANE2 as u32, 3]),
- 3 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 4 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
+ 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -3116,26 +3116,26 @@ pub unsafe fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(a: uint32x4_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x16_t, b: poly8x8_t) -> poly8x16_t {
- static_assert_imm4!(LANE1);
- static_assert_imm3!(LANE2);
- let b: poly8x16_t = simd_shuffle16!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
+ static_assert_uimm_bits!(LANE1, 4);
+ static_assert_uimm_bits!(LANE2, 3);
+ let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
match LANE1 & 0b1111 {
- 0 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 1 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 2 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 3 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 4 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 5 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 6 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 7 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
- 8 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
- 9 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
- 10 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
- 11 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
- 12 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
- 13 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
- 14 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
- 15 => simd_shuffle16!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]),
+ 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]),
+ 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]),
+ 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]),
+ 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]),
+ 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]),
+ 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]),
+ 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -3149,18 +3149,18 @@ pub unsafe fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x16_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(a: poly16x8_t, b: poly16x4_t) -> poly16x8_t {
- static_assert_imm3!(LANE1);
- static_assert_imm2!(LANE2);
- let b: poly16x8_t = simd_shuffle8!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);
+ static_assert_uimm_bits!(LANE1, 3);
+ static_assert_uimm_bits!(LANE2, 2);
+ let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);
match LANE1 & 0b111 {
- 0 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
- 2 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
- 3 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
- 4 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
- 5 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
- 6 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
- 7 => simd_shuffle8!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
+ 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
+ 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
+ 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -3174,12 +3174,12 @@ pub unsafe fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(a: poly16x8_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(a: int64x2_t, b: int64x1_t) -> int64x2_t {
- static_assert_imm1!(LANE1);
- static_assert!(LANE2 : i32 where LANE2 == 0);
- let b: int64x2_t = simd_shuffle2!(b, b, [0, 1]);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert!(LANE2 == 0);
+ let b: int64x2_t = simd_shuffle!(b, b, [0, 1]);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [2 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 2 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -3193,12 +3193,12 @@ pub unsafe fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(a: int64x2_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(a: uint64x2_t, b: uint64x1_t) -> uint64x2_t {
- static_assert_imm1!(LANE1);
- static_assert!(LANE2 : i32 where LANE2 == 0);
- let b: uint64x2_t = simd_shuffle2!(b, b, [0, 1]);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert!(LANE2 == 0);
+ let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [2 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 2 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -3212,12 +3212,12 @@ pub unsafe fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(a: uint64x2_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(a: poly64x2_t, b: poly64x1_t) -> poly64x2_t {
- static_assert_imm1!(LANE1);
- static_assert!(LANE2 : i32 where LANE2 == 0);
- let b: poly64x2_t = simd_shuffle2!(b, b, [0, 1]);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert!(LANE2 == 0);
+ let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [2 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 2 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -3231,14 +3231,14 @@ pub unsafe fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(a: poly64x2_t,
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
- static_assert_imm2!(LANE1);
- static_assert_imm1!(LANE2);
- let b: float32x4_t = simd_shuffle4!(b, b, [0, 1, 2, 3]);
+ static_assert_uimm_bits!(LANE1, 2);
+ static_assert_uimm_bits!(LANE2, 1);
+ let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);
match LANE1 & 0b11 {
- 0 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [4 + LANE2 as u32, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 4 + LANE2 as u32, 2, 3]),
- 2 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 4 + LANE2 as u32, 3]),
- 3 => simd_shuffle4!(a, b, <const LANE1: i32, const LANE2: i32> [0, 1, 2, 4 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
+ 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
+ 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -3252,12 +3252,12 @@ pub unsafe fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(a: float32x4_t
#[rustc_legacy_const_generics(1, 3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
- static_assert_imm1!(LANE1);
- static_assert!(LANE2 : i32 where LANE2 == 0);
- let b: float64x2_t = simd_shuffle2!(b, b, [0, 1]);
+ static_assert_uimm_bits!(LANE1, 1);
+ static_assert!(LANE2 == 0);
+ let b: float64x2_t = simd_shuffle!(b, b, [0, 1]);
match LANE1 & 0b1 {
- 0 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [2 + LANE2 as u32, 1]),
- 1 => simd_shuffle2!(a, b, <const LANE1: i32, const LANE2: i32> [0, 2 + LANE2 as u32]),
+ 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
+ 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
_ => unreachable_unchecked(),
}
}
@@ -3336,7 +3336,7 @@ pub unsafe fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
#[cfg_attr(test, assert_instr(fcvtl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
- let b: float32x2_t = simd_shuffle2!(a, a, [2, 3]);
+ let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
simd_cast(b)
}
@@ -3359,7 +3359,7 @@ pub unsafe fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
#[cfg_attr(test, assert_instr(fcvtn))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
- simd_shuffle4!(a, simd_cast(b), [0, 1, 2, 3])
+ simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3])
}
/// Floating-point convert to lower precision narrow, rounding to odd
@@ -3397,7 +3397,7 @@ pub unsafe fn vcvtxd_f32_f64(a: f64) -> f32 {
#[cfg_attr(test, assert_instr(fcvtxn))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
- simd_shuffle4!(a, vcvtx_f32_f64(b), [0, 1, 2, 3])
+ simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3])
}
/// Fixed-point convert to floating-point
@@ -3409,7 +3409,7 @@ pub unsafe fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64")]
@@ -3427,7 +3427,7 @@ pub unsafe fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64")]
@@ -3445,7 +3445,7 @@ pub unsafe fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32")]
@@ -3463,7 +3463,7 @@ pub unsafe fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64")]
@@ -3481,7 +3481,7 @@ pub unsafe fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64")]
@@ -3499,7 +3499,7 @@ pub unsafe fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64")]
@@ -3517,7 +3517,7 @@ pub unsafe fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32")]
@@ -3535,7 +3535,7 @@ pub unsafe fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64")]
@@ -3553,7 +3553,7 @@ pub unsafe fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64")]
@@ -3571,7 +3571,7 @@ pub unsafe fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64")]
@@ -3589,7 +3589,7 @@ pub unsafe fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32")]
@@ -3607,7 +3607,7 @@ pub unsafe fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64")]
@@ -3625,7 +3625,7 @@ pub unsafe fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64")]
@@ -3643,7 +3643,7 @@ pub unsafe fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64")]
@@ -3661,7 +3661,7 @@ pub unsafe fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32")]
@@ -3679,7 +3679,7 @@ pub unsafe fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64")]
@@ -4617,8 +4617,8 @@ pub unsafe fn vcvtpd_u64_f64(a: f64) -> u64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
- static_assert_imm1!(N);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert_uimm_bits!(N, 1);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -4630,8 +4630,8 @@ pub unsafe fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
- static_assert!(N : i32 where N == 0);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert!(N == 0);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -4643,8 +4643,8 @@ pub unsafe fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
- static_assert_imm1!(N);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert_uimm_bits!(N, 1);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -4656,8 +4656,8 @@ pub unsafe fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
- static_assert!(N : i32 where N == 0);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert!(N == 0);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -4669,7 +4669,7 @@ pub unsafe fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
- static_assert!(N : i32 where N == 0);
+ static_assert!(N == 0);
a
}
@@ -4682,7 +4682,7 @@ pub unsafe fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
- static_assert!(N : i32 where N == 0);
+ static_assert!(N == 0);
a
}
@@ -4695,7 +4695,7 @@ pub unsafe fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
transmute::<u64, _>(simd_extract(a, N as u32))
}
@@ -4708,7 +4708,7 @@ pub unsafe fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
transmute::<f64, _>(simd_extract(a, N as u32))
}
@@ -4721,7 +4721,7 @@ pub unsafe fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_extract(a, N as u32)
}
@@ -4734,7 +4734,7 @@ pub unsafe fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
simd_extract(a, N as u32)
}
@@ -4747,7 +4747,7 @@ pub unsafe fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
simd_extract(a, N as u32)
}
@@ -4760,7 +4760,7 @@ pub unsafe fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_extract(a, N as u32)
}
@@ -4773,7 +4773,7 @@ pub unsafe fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
simd_extract(a, N as u32)
}
@@ -4786,7 +4786,7 @@ pub unsafe fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
simd_extract(a, N as u32)
}
@@ -4799,7 +4799,7 @@ pub unsafe fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
- static_assert!(N : i32 where N == 0);
+ static_assert!(N == 0);
simd_extract(a, N as u32)
}
@@ -4812,7 +4812,7 @@ pub unsafe fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
simd_extract(a, N as u32)
}
@@ -4825,7 +4825,7 @@ pub unsafe fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_extract(a, N as u32)
}
@@ -4838,7 +4838,7 @@ pub unsafe fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
simd_extract(a, N as u32)
}
@@ -4851,7 +4851,7 @@ pub unsafe fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
simd_extract(a, N as u32)
}
@@ -4864,7 +4864,7 @@ pub unsafe fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_extract(a, N as u32)
}
@@ -4877,7 +4877,7 @@ pub unsafe fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
simd_extract(a, N as u32)
}
@@ -4890,7 +4890,7 @@ pub unsafe fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
simd_extract(a, N as u32)
}
@@ -4903,7 +4903,7 @@ pub unsafe fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
- static_assert!(N : i32 where N == 0);
+ static_assert!(N == 0);
simd_extract(a, N as u32)
}
@@ -4916,7 +4916,7 @@ pub unsafe fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
simd_extract(a, N as u32)
}
@@ -4929,7 +4929,7 @@ pub unsafe fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_extract(a, N as u32)
}
@@ -4942,7 +4942,7 @@ pub unsafe fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
simd_extract(a, N as u32)
}
@@ -4955,7 +4955,7 @@ pub unsafe fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
simd_extract(a, N as u32)
}
@@ -4968,7 +4968,7 @@ pub unsafe fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_extract(a, N as u32)
}
@@ -4981,7 +4981,7 @@ pub unsafe fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
simd_extract(a, N as u32)
}
@@ -4994,7 +4994,7 @@ pub unsafe fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
simd_extract(a, N as u32)
}
@@ -5007,7 +5007,7 @@ pub unsafe fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
- static_assert!(N : i32 where N == 0);
+ static_assert!(N == 0);
simd_extract(a, N as u32)
}
@@ -5020,7 +5020,7 @@ pub unsafe fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
simd_extract(a, N as u32)
}
@@ -5033,10 +5033,10 @@ pub unsafe fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
match N & 0b1 {
- 0 => simd_shuffle2!(a, b, [0, 1]),
- 1 => simd_shuffle2!(a, b, [1, 2]),
+ 0 => simd_shuffle!(a, b, [0, 1]),
+ 1 => simd_shuffle!(a, b, [1, 2]),
_ => unreachable_unchecked(),
}
}
@@ -5050,10 +5050,10 @@ pub unsafe fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
match N & 0b1 {
- 0 => simd_shuffle2!(a, b, [0, 1]),
- 1 => simd_shuffle2!(a, b, [1, 2]),
+ 0 => simd_shuffle!(a, b, [0, 1]),
+ 1 => simd_shuffle!(a, b, [1, 2]),
_ => unreachable_unchecked(),
}
}
@@ -5088,8 +5088,8 @@ pub unsafe fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float
#[cfg_attr(test, assert_instr(smlal2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
- let b: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
- let c: int8x8_t = simd_shuffle8!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
vmlal_s8(a, b, c)
}
@@ -5101,8 +5101,8 @@ pub unsafe fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8
#[cfg_attr(test, assert_instr(smlal2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
- let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
- let c: int16x4_t = simd_shuffle4!(c, c, [4, 5, 6, 7]);
+ let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
+ let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
vmlal_s16(a, b, c)
}
@@ -5114,8 +5114,8 @@ pub unsafe fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x
#[cfg_attr(test, assert_instr(smlal2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
- let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]);
- let c: int32x2_t = simd_shuffle2!(c, c, [2, 3]);
+ let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
+ let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
vmlal_s32(a, b, c)
}
@@ -5127,8 +5127,8 @@ pub unsafe fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x
#[cfg_attr(test, assert_instr(umlal2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
- let b: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
- let c: uint8x8_t = simd_shuffle8!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
vmlal_u8(a, b, c)
}
@@ -5140,8 +5140,8 @@ pub unsafe fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint
#[cfg_attr(test, assert_instr(umlal2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
- let b: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
- let c: uint16x4_t = simd_shuffle4!(c, c, [4, 5, 6, 7]);
+ let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
+ let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
vmlal_u16(a, b, c)
}
@@ -5153,8 +5153,8 @@ pub unsafe fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin
#[cfg_attr(test, assert_instr(umlal2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
- let b: uint32x2_t = simd_shuffle2!(b, b, [2, 3]);
- let c: uint32x2_t = simd_shuffle2!(c, c, [2, 3]);
+ let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
+ let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
vmlal_u32(a, b, c)
}
@@ -5211,8 +5211,8 @@ pub unsafe fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- vmlal_high_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-add long
@@ -5224,8 +5224,8 @@ pub unsafe fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
- static_assert_imm3!(LANE);
- vmlal_high_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmlal_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-add long
@@ -5237,8 +5237,8 @@ pub unsafe fn vmlal_high_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t,
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
- static_assert_imm1!(LANE);
- vmlal_high_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-add long
@@ -5250,8 +5250,8 @@ pub unsafe fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
- static_assert_imm2!(LANE);
- vmlal_high_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlal_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-add long
@@ -5263,8 +5263,8 @@ pub unsafe fn vmlal_high_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t,
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t, c: uint16x4_t) -> uint32x4_t {
- static_assert_imm2!(LANE);
- vmlal_high_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-add long
@@ -5276,8 +5276,8 @@ pub unsafe fn vmlal_high_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t,
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
- static_assert_imm3!(LANE);
- vmlal_high_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmlal_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-add long
@@ -5289,8 +5289,8 @@ pub unsafe fn vmlal_high_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t, c: uint32x2_t) -> uint64x2_t {
- static_assert_imm1!(LANE);
- vmlal_high_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-add long
@@ -5302,8 +5302,8 @@ pub unsafe fn vmlal_high_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t,
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlal_high_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
- static_assert_imm2!(LANE);
- vmlal_high_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlal_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Floating-point multiply-subtract from accumulator
@@ -5336,8 +5336,8 @@ pub unsafe fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float
#[cfg_attr(test, assert_instr(smlsl2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
- let b: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
- let c: int8x8_t = simd_shuffle8!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
vmlsl_s8(a, b, c)
}
@@ -5349,8 +5349,8 @@ pub unsafe fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8
#[cfg_attr(test, assert_instr(smlsl2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
- let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
- let c: int16x4_t = simd_shuffle4!(c, c, [4, 5, 6, 7]);
+ let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
+ let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
vmlsl_s16(a, b, c)
}
@@ -5362,8 +5362,8 @@ pub unsafe fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x
#[cfg_attr(test, assert_instr(smlsl2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
- let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]);
- let c: int32x2_t = simd_shuffle2!(c, c, [2, 3]);
+ let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
+ let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
vmlsl_s32(a, b, c)
}
@@ -5375,8 +5375,8 @@ pub unsafe fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x
#[cfg_attr(test, assert_instr(umlsl2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
- let b: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
- let c: uint8x8_t = simd_shuffle8!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
vmlsl_u8(a, b, c)
}
@@ -5388,8 +5388,8 @@ pub unsafe fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint
#[cfg_attr(test, assert_instr(umlsl2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
- let b: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
- let c: uint16x4_t = simd_shuffle4!(c, c, [4, 5, 6, 7]);
+ let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
+ let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
vmlsl_u16(a, b, c)
}
@@ -5401,8 +5401,8 @@ pub unsafe fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin
#[cfg_attr(test, assert_instr(umlsl2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
- let b: uint32x2_t = simd_shuffle2!(b, b, [2, 3]);
- let c: uint32x2_t = simd_shuffle2!(c, c, [2, 3]);
+ let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
+ let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
vmlsl_u32(a, b, c)
}
@@ -5459,8 +5459,8 @@ pub unsafe fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- vmlsl_high_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-subtract long
@@ -5472,8 +5472,8 @@ pub unsafe fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
- static_assert_imm3!(LANE);
- vmlsl_high_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmlsl_high_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-subtract long
@@ -5485,8 +5485,8 @@ pub unsafe fn vmlsl_high_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t,
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
- static_assert_imm1!(LANE);
- vmlsl_high_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-subtract long
@@ -5498,8 +5498,8 @@ pub unsafe fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
- static_assert_imm2!(LANE);
- vmlsl_high_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsl_high_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-subtract long
@@ -5511,8 +5511,8 @@ pub unsafe fn vmlsl_high_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t,
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t, c: uint16x4_t) -> uint32x4_t {
- static_assert_imm2!(LANE);
- vmlsl_high_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-subtract long
@@ -5524,8 +5524,8 @@ pub unsafe fn vmlsl_high_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t,
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
- static_assert_imm3!(LANE);
- vmlsl_high_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmlsl_high_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-subtract long
@@ -5537,8 +5537,8 @@ pub unsafe fn vmlsl_high_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x8_t
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t, c: uint32x2_t) -> uint64x2_t {
- static_assert_imm1!(LANE);
- vmlsl_high_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply-subtract long
@@ -5550,8 +5550,8 @@ pub unsafe fn vmlsl_high_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t,
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmlsl_high_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
- static_assert_imm2!(LANE);
- vmlsl_high_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsl_high_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Extract narrow
@@ -5563,7 +5563,7 @@ pub unsafe fn vmlsl_high_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x4_t
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
let c: int8x8_t = simd_cast(b);
- simd_shuffle16!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Extract narrow
@@ -5575,7 +5575,7 @@ pub unsafe fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
let c: int16x4_t = simd_cast(b);
- simd_shuffle8!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Extract narrow
@@ -5587,7 +5587,7 @@ pub unsafe fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
let c: int32x2_t = simd_cast(b);
- simd_shuffle4!(a, c, [0, 1, 2, 3])
+ simd_shuffle!(a, c, [0, 1, 2, 3])
}
/// Extract narrow
@@ -5599,7 +5599,7 @@ pub unsafe fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
let c: uint8x8_t = simd_cast(b);
- simd_shuffle16!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Extract narrow
@@ -5611,7 +5611,7 @@ pub unsafe fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
let c: uint16x4_t = simd_cast(b);
- simd_shuffle8!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Extract narrow
@@ -5623,7 +5623,7 @@ pub unsafe fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
let c: uint32x2_t = simd_cast(b);
- simd_shuffle4!(a, c, [0, 1, 2, 3])
+ simd_shuffle!(a, c, [0, 1, 2, 3])
}
/// Negate
@@ -6742,7 +6742,7 @@ pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0i8")]
@@ -6760,7 +6760,7 @@ pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> in
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0i8")]
@@ -6778,7 +6778,7 @@ pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> i
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0i8")]
@@ -6796,7 +6796,7 @@ pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -6809,7 +6809,7 @@ pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -6822,7 +6822,7 @@ pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -6835,7 +6835,7 @@ pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> u
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -6848,7 +6848,7 @@ pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -6861,7 +6861,7 @@ pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -6874,7 +6874,7 @@ pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> p
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0i8")]
@@ -6892,7 +6892,7 @@ pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0i8")]
@@ -7050,7 +7050,7 @@ pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0i8")]
@@ -7068,7 +7068,7 @@ pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> in
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0i8")]
@@ -7086,7 +7086,7 @@ pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> i
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0i8")]
@@ -7104,7 +7104,7 @@ pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -7117,7 +7117,7 @@ pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -7130,7 +7130,7 @@ pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -7143,7 +7143,7 @@ pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> p
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -7156,7 +7156,7 @@ pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> u
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -7169,7 +7169,7 @@ pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -7182,7 +7182,7 @@ pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0i8")]
@@ -7200,7 +7200,7 @@ pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0i8")]
@@ -7358,7 +7358,7 @@ pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0i8")]
@@ -7376,7 +7376,7 @@ pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> in
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0i8")]
@@ -7394,7 +7394,7 @@ pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> i
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0i8")]
@@ -7412,7 +7412,7 @@ pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -7425,7 +7425,7 @@ pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -7438,7 +7438,7 @@ pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -7451,7 +7451,7 @@ pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> p
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -7464,7 +7464,7 @@ pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> u
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -7477,7 +7477,7 @@ pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -7490,7 +7490,7 @@ pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0i8")]
@@ -7508,7 +7508,7 @@ pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0i8")]
@@ -7526,7 +7526,7 @@ pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
*a = simd_extract(b, LANE as u32);
}
@@ -7539,7 +7539,7 @@ pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
*a = simd_extract(b, LANE as u32);
}
@@ -7718,7 +7718,7 @@ pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v16i8.p0i8")]
@@ -7736,7 +7736,7 @@ pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v1i64.p0i8")]
@@ -7754,7 +7754,7 @@ pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v2i64.p0i8")]
@@ -7772,7 +7772,7 @@ pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
transmute(vst2q_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -7785,7 +7785,7 @@ pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
transmute(vst2_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -7798,7 +7798,7 @@ pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vst2q_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -7811,7 +7811,7 @@ pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
transmute(vst2q_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -7824,7 +7824,7 @@ pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
transmute(vst2_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -7837,7 +7837,7 @@ pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vst2q_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -7850,7 +7850,7 @@ pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v1f64.p0i8")]
@@ -7868,7 +7868,7 @@ pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v2f64.p0i8")]
@@ -7956,7 +7956,7 @@ pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v16i8.p0i8")]
@@ -7974,7 +7974,7 @@ pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v1i64.p0i8")]
@@ -7992,7 +7992,7 @@ pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v2i64.p0i8")]
@@ -8010,7 +8010,7 @@ pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
transmute(vst3q_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -8023,7 +8023,7 @@ pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
transmute(vst3_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -8036,7 +8036,7 @@ pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vst3q_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -8049,7 +8049,7 @@ pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
transmute(vst3q_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -8062,7 +8062,7 @@ pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
transmute(vst3_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -8075,7 +8075,7 @@ pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vst3q_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -8088,7 +8088,7 @@ pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v1f64.p0i8")]
@@ -8106,7 +8106,7 @@ pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v2f64.p0i8")]
@@ -8194,7 +8194,7 @@ pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v16i8.p0i8")]
@@ -8212,7 +8212,7 @@ pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v1i64.p0i8")]
@@ -8230,7 +8230,7 @@ pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v2i64.p0i8")]
@@ -8248,7 +8248,7 @@ pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
transmute(vst4q_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -8261,7 +8261,7 @@ pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
transmute(vst4_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -8274,7 +8274,7 @@ pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vst4q_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -8287,7 +8287,7 @@ pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
transmute(vst4q_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -8300,7 +8300,7 @@ pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
transmute(vst4_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -8313,7 +8313,7 @@ pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vst4q_lane_s64::<LANE>(transmute(a), transmute(b)))
}
@@ -8326,7 +8326,7 @@ pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v1f64.p0i8")]
@@ -8344,7 +8344,7 @@ pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v2f64.p0i8")]
@@ -8406,7 +8406,7 @@ pub unsafe fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
simd_mul(a, transmute::<f64, _>(simd_extract(b, LANE as u32)))
}
@@ -8419,7 +8419,7 @@ pub unsafe fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_mul(a, transmute::<f64, _>(simd_extract(b, LANE as u32)))
}
@@ -8432,8 +8432,8 @@ pub unsafe fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
- static_assert!(LANE : i32 where LANE == 0);
- simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert!(LANE == 0);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Floating-point multiply
@@ -8445,8 +8445,8 @@ pub unsafe fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
- static_assert_imm1!(LANE);
- simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Floating-point multiply
@@ -8458,7 +8458,7 @@ pub unsafe fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
let b: f32 = simd_extract(b, LANE as u32);
a * b
}
@@ -8472,7 +8472,7 @@ pub unsafe fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
let b: f32 = simd_extract(b, LANE as u32);
a * b
}
@@ -8486,7 +8486,7 @@ pub unsafe fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
let b: f64 = simd_extract(b, LANE as u32);
a * b
}
@@ -8500,7 +8500,7 @@ pub unsafe fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
let b: f64 = simd_extract(b, LANE as u32);
a * b
}
@@ -8513,8 +8513,8 @@ pub unsafe fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
#[cfg_attr(test, assert_instr(smull2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
- let a: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
- let b: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
vmull_s8(a, b)
}
@@ -8526,8 +8526,8 @@ pub unsafe fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
#[cfg_attr(test, assert_instr(smull2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
- let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
- let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
+ let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
vmull_s16(a, b)
}
@@ -8539,8 +8539,8 @@ pub unsafe fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(smull2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
- let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]);
- let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
+ let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
vmull_s32(a, b)
}
@@ -8552,8 +8552,8 @@ pub unsafe fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
#[cfg_attr(test, assert_instr(umull2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
- let a: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
- let b: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
vmull_u8(a, b)
}
@@ -8565,8 +8565,8 @@ pub unsafe fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(umull2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
- let a: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
- let b: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
+ let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
vmull_u16(a, b)
}
@@ -8578,8 +8578,8 @@ pub unsafe fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
#[cfg_attr(test, assert_instr(umull2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
- let a: uint32x2_t = simd_shuffle2!(a, a, [2, 3]);
- let b: uint32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
+ let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
vmull_u32(a, b)
}
@@ -8607,8 +8607,8 @@ pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 {
#[cfg_attr(test, assert_instr(pmull))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
- let a: poly8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
- let b: poly8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
vmull_p8(a, b)
}
@@ -8676,8 +8676,8 @@ pub unsafe fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- vmull_high_s16(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply long
@@ -8689,8 +8689,8 @@ pub unsafe fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
- static_assert_imm3!(LANE);
- vmull_high_s16(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmull_high_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply long
@@ -8702,8 +8702,8 @@ pub unsafe fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
- static_assert_imm1!(LANE);
- vmull_high_s32(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply long
@@ -8715,8 +8715,8 @@ pub unsafe fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
- static_assert_imm2!(LANE);
- vmull_high_s32(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmull_high_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply long
@@ -8728,8 +8728,8 @@ pub unsafe fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
- static_assert_imm2!(LANE);
- vmull_high_u16(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply long
@@ -8741,8 +8741,8 @@ pub unsafe fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
- static_assert_imm3!(LANE);
- vmull_high_u16(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmull_high_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply long
@@ -8754,8 +8754,8 @@ pub unsafe fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
- static_assert_imm1!(LANE);
- vmull_high_u32(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply long
@@ -8767,8 +8767,8 @@ pub unsafe fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
- static_assert_imm2!(LANE);
- vmull_high_u32(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmull_high_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Floating-point multiply extended
@@ -8844,7 +8844,7 @@ pub unsafe fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
vmulx_f64(a, transmute::<f64, _>(simd_extract(b, LANE as u32)))
}
@@ -8857,7 +8857,7 @@ pub unsafe fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vmulx_f64(a, transmute::<f64, _>(simd_extract(b, LANE as u32)))
}
@@ -8870,8 +8870,8 @@ pub unsafe fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
- static_assert_imm1!(LANE);
- vmulx_f32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Floating-point multiply extended
@@ -8883,8 +8883,8 @@ pub unsafe fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
- static_assert_imm2!(LANE);
- vmulx_f32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Floating-point multiply extended
@@ -8896,8 +8896,8 @@ pub unsafe fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
- static_assert_imm1!(LANE);
- vmulxq_f32(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Floating-point multiply extended
@@ -8909,8 +8909,8 @@ pub unsafe fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
- static_assert_imm2!(LANE);
- vmulxq_f32(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmulxq_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Floating-point multiply extended
@@ -8922,8 +8922,8 @@ pub unsafe fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
- static_assert!(LANE : i32 where LANE == 0);
- vmulxq_f64(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert!(LANE == 0);
+ vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Floating-point multiply extended
@@ -8935,8 +8935,8 @@ pub unsafe fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
- static_assert_imm1!(LANE);
- vmulxq_f64(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Floating-point multiply extended
@@ -8980,7 +8980,7 @@ pub unsafe fn vmulxd_f64(a: f64, b: f64) -> f64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vmulxs_f32(a, simd_extract(b, LANE as u32))
}
@@ -8993,7 +8993,7 @@ pub unsafe fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vmulxs_f32(a, simd_extract(b, LANE as u32))
}
@@ -9006,7 +9006,7 @@ pub unsafe fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
vmulxd_f64(a, simd_extract(b, LANE as u32))
}
@@ -9019,7 +9019,7 @@ pub unsafe fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vmulxd_f64(a, simd_extract(b, LANE as u32))
}
@@ -9086,7 +9086,7 @@ pub unsafe fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfma_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vfma_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32)))
}
@@ -9099,7 +9099,7 @@ pub unsafe fn vfma_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfma_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vfma_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32)))
}
@@ -9112,7 +9112,7 @@ pub unsafe fn vfma_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfmaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vfmaq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32)))
}
@@ -9125,7 +9125,7 @@ pub unsafe fn vfmaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfmaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vfmaq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32)))
}
@@ -9138,7 +9138,7 @@ pub unsafe fn vfmaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfma_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
vfma_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32)))
}
@@ -9151,7 +9151,7 @@ pub unsafe fn vfma_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfma_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c: float64x2_t) -> float64x1_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vfma_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32)))
}
@@ -9164,7 +9164,7 @@ pub unsafe fn vfma_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfmaq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c: float64x1_t) -> float64x2_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
vfmaq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32)))
}
@@ -9177,7 +9177,7 @@ pub unsafe fn vfmaq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfmaq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vfmaq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32)))
}
@@ -9195,7 +9195,7 @@ pub unsafe fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) ->
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f32")]
fn vfmas_lane_f32_(a: f32, b: f32, c: f32) -> f32;
}
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
let c: f32 = simd_extract(c, LANE as u32);
vfmas_lane_f32_(b, c, a)
}
@@ -9214,7 +9214,7 @@ pub unsafe fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f32")]
fn vfmas_laneq_f32_(a: f32, b: f32, c: f32) -> f32;
}
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
let c: f32 = simd_extract(c, LANE as u32);
vfmas_laneq_f32_(b, c, a)
}
@@ -9233,7 +9233,7 @@ pub unsafe fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) ->
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f64")]
fn vfmad_lane_f64_(a: f64, b: f64, c: f64) -> f64;
}
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
let c: f64 = simd_extract(c, LANE as u32);
vfmad_lane_f64_(b, c, a)
}
@@ -9252,7 +9252,7 @@ pub unsafe fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.fma.f64")]
fn vfmad_laneq_f64_(a: f64, b: f64, c: f64) -> f64;
}
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
let c: f64 = simd_extract(c, LANE as u32);
vfmad_laneq_f64_(b, c, a)
}
@@ -9312,7 +9312,7 @@ pub unsafe fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfms_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vfms_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32)))
}
@@ -9325,7 +9325,7 @@ pub unsafe fn vfms_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfms_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vfms_f32(a, b, vdup_n_f32(simd_extract(c, LANE as u32)))
}
@@ -9338,7 +9338,7 @@ pub unsafe fn vfms_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfmsq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vfmsq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32)))
}
@@ -9351,7 +9351,7 @@ pub unsafe fn vfmsq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfmsq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vfmsq_f32(a, b, vdupq_n_f32(simd_extract(c, LANE as u32)))
}
@@ -9364,7 +9364,7 @@ pub unsafe fn vfmsq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfms_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
vfms_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32)))
}
@@ -9377,7 +9377,7 @@ pub unsafe fn vfms_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfms_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c: float64x2_t) -> float64x1_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vfms_f64(a, b, vdup_n_f64(simd_extract(c, LANE as u32)))
}
@@ -9390,7 +9390,7 @@ pub unsafe fn vfms_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfmsq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c: float64x1_t) -> float64x2_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
vfmsq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32)))
}
@@ -9403,7 +9403,7 @@ pub unsafe fn vfmsq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vfmsq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vfmsq_f64(a, b, vdupq_n_f64(simd_extract(c, LANE as u32)))
}
@@ -9749,7 +9749,7 @@ pub unsafe fn vaddlvq_u32(a: uint32x4_t) -> u64 {
#[cfg_attr(test, assert_instr(ssubw))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
- let c: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
simd_sub(a, simd_cast(c))
}
@@ -9761,7 +9761,7 @@ pub unsafe fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
#[cfg_attr(test, assert_instr(ssubw))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
- let c: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
simd_sub(a, simd_cast(c))
}
@@ -9773,7 +9773,7 @@ pub unsafe fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(ssubw))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
- let c: int32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
simd_sub(a, simd_cast(c))
}
@@ -9785,7 +9785,7 @@ pub unsafe fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
#[cfg_attr(test, assert_instr(usubw))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
- let c: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
simd_sub(a, simd_cast(c))
}
@@ -9797,7 +9797,7 @@ pub unsafe fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(usubw))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
- let c: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
simd_sub(a, simd_cast(c))
}
@@ -9809,7 +9809,7 @@ pub unsafe fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
#[cfg_attr(test, assert_instr(usubw))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
- let c: uint32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
simd_sub(a, simd_cast(c))
}
@@ -9821,9 +9821,9 @@ pub unsafe fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
#[cfg_attr(test, assert_instr(ssubl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
- let c: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
let d: int16x8_t = simd_cast(c);
- let e: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
let f: int16x8_t = simd_cast(e);
simd_sub(d, f)
}
@@ -9836,9 +9836,9 @@ pub unsafe fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
#[cfg_attr(test, assert_instr(ssubl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
- let c: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
+ let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
let d: int32x4_t = simd_cast(c);
- let e: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
let f: int32x4_t = simd_cast(e);
simd_sub(d, f)
}
@@ -9851,9 +9851,9 @@ pub unsafe fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(ssubl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
- let c: int32x2_t = simd_shuffle2!(a, a, [2, 3]);
+ let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
let d: int64x2_t = simd_cast(c);
- let e: int32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
let f: int64x2_t = simd_cast(e);
simd_sub(d, f)
}
@@ -9866,9 +9866,9 @@ pub unsafe fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
#[cfg_attr(test, assert_instr(usubl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
- let c: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
let d: uint16x8_t = simd_cast(c);
- let e: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
let f: uint16x8_t = simd_cast(e);
simd_sub(d, f)
}
@@ -9881,9 +9881,9 @@ pub unsafe fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(usubl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
- let c: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
+ let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
let d: uint32x4_t = simd_cast(c);
- let e: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
let f: uint32x4_t = simd_cast(e);
simd_sub(d, f)
}
@@ -9896,9 +9896,9 @@ pub unsafe fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
#[cfg_attr(test, assert_instr(usubl))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
- let c: uint32x2_t = simd_shuffle2!(a, a, [2, 3]);
+ let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
let d: uint64x2_t = simd_cast(c);
- let e: uint32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
let f: uint64x2_t = simd_cast(e);
simd_sub(d, f)
}
@@ -10301,8 +10301,8 @@ pub unsafe fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t)
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmla_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
- static_assert!(LANE : i32 where LANE == 0);
- let c: float32x2_t = simd_shuffle2!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert!(LANE == 0);
+ let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmla_f32(a, b, c)
}
@@ -10314,8 +10314,8 @@ pub unsafe fn vcmla_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmla_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
- static_assert_imm1!(LANE);
- let c: float32x2_t = simd_shuffle2!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmla_f32(a, b, c)
}
@@ -10327,8 +10327,8 @@ pub unsafe fn vcmla_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmlaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
- static_assert!(LANE : i32 where LANE == 0);
- let c: float32x4_t = simd_shuffle4!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert!(LANE == 0);
+ let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmlaq_f32(a, b, c)
}
@@ -10340,8 +10340,8 @@ pub unsafe fn vcmlaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmlaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
- static_assert_imm1!(LANE);
- let c: float32x4_t = simd_shuffle4!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmlaq_f32(a, b, c)
}
@@ -10353,8 +10353,8 @@ pub unsafe fn vcmlaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t,
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmla_rot90_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
- static_assert!(LANE : i32 where LANE == 0);
- let c: float32x2_t = simd_shuffle2!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert!(LANE == 0);
+ let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmla_rot90_f32(a, b, c)
}
@@ -10366,8 +10366,8 @@ pub unsafe fn vcmla_rot90_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmla_rot90_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
- static_assert_imm1!(LANE);
- let c: float32x2_t = simd_shuffle2!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmla_rot90_f32(a, b, c)
}
@@ -10379,8 +10379,8 @@ pub unsafe fn vcmla_rot90_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmlaq_rot90_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
- static_assert!(LANE : i32 where LANE == 0);
- let c: float32x4_t = simd_shuffle4!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert!(LANE == 0);
+ let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmlaq_rot90_f32(a, b, c)
}
@@ -10392,8 +10392,8 @@ pub unsafe fn vcmlaq_rot90_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmlaq_rot90_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
- static_assert_imm1!(LANE);
- let c: float32x4_t = simd_shuffle4!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmlaq_rot90_f32(a, b, c)
}
@@ -10405,8 +10405,8 @@ pub unsafe fn vcmlaq_rot90_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmla_rot180_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
- static_assert!(LANE : i32 where LANE == 0);
- let c: float32x2_t = simd_shuffle2!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert!(LANE == 0);
+ let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmla_rot180_f32(a, b, c)
}
@@ -10418,8 +10418,8 @@ pub unsafe fn vcmla_rot180_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmla_rot180_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
- static_assert_imm1!(LANE);
- let c: float32x2_t = simd_shuffle2!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmla_rot180_f32(a, b, c)
}
@@ -10431,8 +10431,8 @@ pub unsafe fn vcmla_rot180_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmlaq_rot180_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
- static_assert!(LANE : i32 where LANE == 0);
- let c: float32x4_t = simd_shuffle4!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert!(LANE == 0);
+ let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmlaq_rot180_f32(a, b, c)
}
@@ -10444,8 +10444,8 @@ pub unsafe fn vcmlaq_rot180_lane_f32<const LANE: i32>(a: float32x4_t, b: float32
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmlaq_rot180_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
- static_assert_imm1!(LANE);
- let c: float32x4_t = simd_shuffle4!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmlaq_rot180_f32(a, b, c)
}
@@ -10457,8 +10457,8 @@ pub unsafe fn vcmlaq_rot180_laneq_f32<const LANE: i32>(a: float32x4_t, b: float3
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmla_rot270_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
- static_assert!(LANE : i32 where LANE == 0);
- let c: float32x2_t = simd_shuffle2!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert!(LANE == 0);
+ let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmla_rot270_f32(a, b, c)
}
@@ -10470,8 +10470,8 @@ pub unsafe fn vcmla_rot270_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmla_rot270_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
- static_assert_imm1!(LANE);
- let c: float32x2_t = simd_shuffle2!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmla_rot270_f32(a, b, c)
}
@@ -10483,8 +10483,8 @@ pub unsafe fn vcmla_rot270_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmlaq_rot270_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
- static_assert!(LANE : i32 where LANE == 0);
- let c: float32x4_t = simd_shuffle4!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert!(LANE == 0);
+ let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmlaq_rot270_f32(a, b, c)
}
@@ -10496,8 +10496,8 @@ pub unsafe fn vcmlaq_rot270_lane_f32<const LANE: i32>(a: float32x4_t, b: float32
#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vcmlaq_rot270_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
- static_assert_imm1!(LANE);
- let c: float32x4_t = simd_shuffle4!(c, c, <const LANE: i32> [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: float32x4_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1, 2 * LANE as u32, 2 * LANE as u32 + 1]);
vcmlaq_rot270_f32(a, b, c)
}
@@ -10569,8 +10569,8 @@ pub unsafe fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4
#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vdot_lane_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t {
- static_assert_imm1!(LANE);
- let c: int8x8_t = simd_shuffle8!(c, c, <const LANE: i32> [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: int8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
vdot_s32(a, b, c)
}
@@ -10582,8 +10582,8 @@ pub unsafe fn vdot_lane_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x
#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t {
- static_assert_imm2!(LANE);
- let c: int8x8_t = simd_shuffle8!(c, c, <const LANE: i32> [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
+ static_assert_uimm_bits!(LANE, 2);
+ let c: int8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
vdot_s32(a, b, c)
}
@@ -10595,8 +10595,8 @@ pub unsafe fn vdot_laneq_s32<const LANE: i32>(a: int32x2_t, b: int8x8_t, c: int8
#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vdotq_lane_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x8_t) -> int32x4_t {
- static_assert_imm1!(LANE);
- let c: int8x16_t = simd_shuffle16!(c, c, <const LANE: i32> [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: int8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
vdotq_s32(a, b, c)
}
@@ -10608,8 +10608,8 @@ pub unsafe fn vdotq_lane_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int
#[cfg_attr(test, assert_instr(sdot, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- let c: int8x16_t = simd_shuffle16!(c, c, <const LANE: i32> [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
+ static_assert_uimm_bits!(LANE, 2);
+ let c: int8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
vdotq_s32(a, b, c)
}
@@ -10621,8 +10621,8 @@ pub unsafe fn vdotq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int8x16_t, c: in
#[cfg_attr(test, assert_instr(udot, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vdot_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t {
- static_assert_imm1!(LANE);
- let c: uint8x8_t = simd_shuffle8!(c, c, <const LANE: i32> [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: uint8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
vdot_u32(a, b, c)
}
@@ -10634,8 +10634,8 @@ pub unsafe fn vdot_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uin
#[cfg_attr(test, assert_instr(udot, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vdot_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t {
- static_assert_imm2!(LANE);
- let c: uint8x8_t = simd_shuffle8!(c, c, <const LANE: i32> [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
+ static_assert_uimm_bits!(LANE, 2);
+ let c: uint8x8_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
vdot_u32(a, b, c)
}
@@ -10647,8 +10647,8 @@ pub unsafe fn vdot_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint8x8_t, c: ui
#[cfg_attr(test, assert_instr(udot, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vdotq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x8_t) -> uint32x4_t {
- static_assert_imm1!(LANE);
- let c: uint8x16_t = simd_shuffle16!(c, c, <const LANE: i32> [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: uint8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
vdotq_u32(a, b, c)
}
@@ -10660,8 +10660,8 @@ pub unsafe fn vdotq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: u
#[cfg_attr(test, assert_instr(udot, LANE = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn vdotq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t {
- static_assert_imm2!(LANE);
- let c: uint8x16_t = simd_shuffle16!(c, c, <const LANE: i32> [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
+ static_assert_uimm_bits!(LANE, 2);
+ let c: uint8x16_t = simd_shuffle!(c, c, [4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3, 4 * LANE as u32, 4 * LANE as u32 + 1, 4 * LANE as u32 + 2, 4 * LANE as u32 + 3]);
vdotq_u32(a, b, c)
}
@@ -11009,7 +11009,7 @@ pub unsafe fn vminnmvq_f32(a: float32x4_t) -> f32 {
#[cfg_attr(test, assert_instr(sxtl2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
- let a: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
vmovl_s8(a)
}
@@ -11021,7 +11021,7 @@ pub unsafe fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
#[cfg_attr(test, assert_instr(sxtl2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
- let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
+ let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
vmovl_s16(a)
}
@@ -11033,7 +11033,7 @@ pub unsafe fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(sxtl2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
- let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]);
+ let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
vmovl_s32(a)
}
@@ -11045,7 +11045,7 @@ pub unsafe fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
#[cfg_attr(test, assert_instr(uxtl2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
- let a: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
vmovl_u8(a)
}
@@ -11057,7 +11057,7 @@ pub unsafe fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(uxtl2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
- let a: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
+ let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
vmovl_u16(a)
}
@@ -11069,7 +11069,7 @@ pub unsafe fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
#[cfg_attr(test, assert_instr(uxtl2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
- let a: uint32x2_t = simd_shuffle2!(a, a, [2, 3]);
+ let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
vmovl_u32(a)
}
@@ -11280,8 +11280,8 @@ pub unsafe fn vqdmulls_s32(a: i32, b: i32) -> i64 {
#[cfg_attr(test, assert_instr(sqdmull2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
- let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
- let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
+ let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
vqdmull_s16(a, b)
}
@@ -11293,8 +11293,8 @@ pub unsafe fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(sqdmull2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
- let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]);
- let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
+ let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
vqdmull_s32(a, b)
}
@@ -11306,7 +11306,7 @@ pub unsafe fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
#[cfg_attr(test, assert_instr(sqdmull2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
- let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
+ let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
let b: int16x4_t = vdup_n_s16(b);
vqdmull_s16(a, b)
}
@@ -11319,7 +11319,7 @@ pub unsafe fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
#[cfg_attr(test, assert_instr(sqdmull2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
- let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]);
+ let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
let b: int32x2_t = vdup_n_s32(b);
vqdmull_s32(a, b)
}
@@ -11333,8 +11333,8 @@ pub unsafe fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
- static_assert_imm3!(N);
- let b: int16x4_t = simd_shuffle4!(b, b, <const N: i32> [N as u32, N as u32, N as u32, N as u32]);
+ static_assert_uimm_bits!(N, 3);
+ let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
vqdmull_s16(a, b)
}
@@ -11347,8 +11347,8 @@ pub unsafe fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
- static_assert_imm2!(N);
- let b: int32x2_t = simd_shuffle2!(b, b, <const N: i32> [N as u32, N as u32]);
+ static_assert_uimm_bits!(N, 2);
+ let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
vqdmull_s32(a, b)
}
@@ -11361,7 +11361,7 @@ pub unsafe fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
let b: i16 = simd_extract(b, N as u32);
vqdmullh_s16(a, b)
}
@@ -11375,7 +11375,7 @@ pub unsafe fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
let b: i16 = simd_extract(b, N as u32);
vqdmullh_s16(a, b)
}
@@ -11389,7 +11389,7 @@ pub unsafe fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
let b: i32 = simd_extract(b, N as u32);
vqdmulls_s32(a, b)
}
@@ -11403,7 +11403,7 @@ pub unsafe fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
let b: i32 = simd_extract(b, N as u32);
vqdmulls_s32(a, b)
}
@@ -11417,9 +11417,9 @@ pub unsafe fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
- static_assert_imm2!(N);
- let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
- let b: int16x4_t = simd_shuffle4!(b, b, <const N: i32> [N as u32, N as u32, N as u32, N as u32]);
+ static_assert_uimm_bits!(N, 2);
+ let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
+ let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
vqdmull_s16(a, b)
}
@@ -11432,9 +11432,9 @@ pub unsafe fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
- static_assert_imm1!(N);
- let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]);
- let b: int32x2_t = simd_shuffle2!(b, b, <const N: i32> [N as u32, N as u32]);
+ static_assert_uimm_bits!(N, 1);
+ let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
+ let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
vqdmull_s32(a, b)
}
@@ -11447,9 +11447,9 @@ pub unsafe fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
- static_assert_imm3!(N);
- let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
- let b: int16x4_t = simd_shuffle4!(b, b, <const N: i32> [N as u32, N as u32, N as u32, N as u32]);
+ static_assert_uimm_bits!(N, 3);
+ let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
+ let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
vqdmull_s16(a, b)
}
@@ -11462,9 +11462,9 @@ pub unsafe fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
- static_assert_imm2!(N);
- let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]);
- let b: int32x2_t = simd_shuffle2!(b, b, <const N: i32> [N as u32, N as u32]);
+ static_assert_uimm_bits!(N, 2);
+ let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
+ let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
vqdmull_s32(a, b)
}
@@ -11521,7 +11521,7 @@ pub unsafe fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
}
@@ -11534,7 +11534,7 @@ pub unsafe fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
}
@@ -11547,7 +11547,7 @@ pub unsafe fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
}
@@ -11560,7 +11560,7 @@ pub unsafe fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
}
@@ -11573,7 +11573,7 @@ pub unsafe fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
}
@@ -11586,7 +11586,7 @@ pub unsafe fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
}
@@ -11623,7 +11623,7 @@ pub unsafe fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqdmlalh_s16(a, b, simd_extract(c, LANE as u32))
}
@@ -11636,7 +11636,7 @@ pub unsafe fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
vqdmlalh_s16(a, b, simd_extract(c, LANE as u32))
}
@@ -11649,7 +11649,7 @@ pub unsafe fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t)
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vqdmlals_s32(a, b, simd_extract(c, LANE as u32))
}
@@ -11662,7 +11662,7 @@ pub unsafe fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqdmlals_s32(a, b, simd_extract(c, LANE as u32))
}
@@ -11719,7 +11719,7 @@ pub unsafe fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
}
@@ -11732,7 +11732,7 @@ pub unsafe fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
}
@@ -11745,7 +11745,7 @@ pub unsafe fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
}
@@ -11758,7 +11758,7 @@ pub unsafe fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
}
@@ -11771,7 +11771,7 @@ pub unsafe fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
}
@@ -11784,7 +11784,7 @@ pub unsafe fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
}
@@ -11821,7 +11821,7 @@ pub unsafe fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqdmlslh_s16(a, b, simd_extract(c, LANE as u32))
}
@@ -11834,7 +11834,7 @@ pub unsafe fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
vqdmlslh_s16(a, b, simd_extract(c, LANE as u32))
}
@@ -11847,7 +11847,7 @@ pub unsafe fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t)
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vqdmlsls_s32(a, b, simd_extract(c, LANE as u32))
}
@@ -11860,7 +11860,7 @@ pub unsafe fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -
#[rustc_legacy_const_generics(3)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqdmlsls_s32(a, b, simd_extract(c, LANE as u32))
}
@@ -11899,7 +11899,7 @@ pub unsafe fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
let b: i16 = simd_extract(b, N as u32);
vqdmulhh_s16(a, b)
}
@@ -11913,7 +11913,7 @@ pub unsafe fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
let b: i16 = simd_extract(b, N as u32);
vqdmulhh_s16(a, b)
}
@@ -11927,7 +11927,7 @@ pub unsafe fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
let b: i32 = simd_extract(b, N as u32);
vqdmulhs_s32(a, b)
}
@@ -11941,7 +11941,7 @@ pub unsafe fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
let b: i32 = simd_extract(b, N as u32);
vqdmulhs_s32(a, b)
}
@@ -11955,7 +11955,7 @@ pub unsafe fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqdmulh_s16(a, vdup_n_s16(simd_extract(b, LANE as u32)))
}
@@ -11968,7 +11968,7 @@ pub unsafe fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> i
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqdmulhq_s16(a, vdupq_n_s16(simd_extract(b, LANE as u32)))
}
@@ -11981,7 +11981,7 @@ pub unsafe fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vqdmulh_s32(a, vdup_n_s32(simd_extract(b, LANE as u32)))
}
@@ -11994,7 +11994,7 @@ pub unsafe fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> i
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vqdmulhq_s32(a, vdupq_n_s32(simd_extract(b, LANE as u32)))
}
@@ -12082,7 +12082,7 @@ pub unsafe fn vqmovnd_u64(a: u64) -> u32 {
#[cfg_attr(test, assert_instr(sqxtn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
- simd_shuffle16!(a, vqmovn_s16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(a, vqmovn_s16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Signed saturating extract narrow
@@ -12093,7 +12093,7 @@ pub unsafe fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
#[cfg_attr(test, assert_instr(sqxtn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
- simd_shuffle8!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Signed saturating extract narrow
@@ -12104,7 +12104,7 @@ pub unsafe fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
#[cfg_attr(test, assert_instr(sqxtn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
- simd_shuffle4!(a, vqmovn_s64(b), [0, 1, 2, 3])
+ simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3])
}
/// Signed saturating extract narrow
@@ -12115,7 +12115,7 @@ pub unsafe fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(uqxtn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
- simd_shuffle16!(a, vqmovn_u16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(a, vqmovn_u16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Signed saturating extract narrow
@@ -12126,7 +12126,7 @@ pub unsafe fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
#[cfg_attr(test, assert_instr(uqxtn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
- simd_shuffle8!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Signed saturating extract narrow
@@ -12137,7 +12137,7 @@ pub unsafe fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(uqxtn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
- simd_shuffle4!(a, vqmovn_u64(b), [0, 1, 2, 3])
+ simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3])
}
/// Signed saturating extract unsigned narrow
@@ -12181,7 +12181,7 @@ pub unsafe fn vqmovund_s64(a: i64) -> u32 {
#[cfg_attr(test, assert_instr(sqxtun2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
- simd_shuffle16!(a, vqmovun_s16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(a, vqmovun_s16(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Signed saturating extract unsigned narrow
@@ -12192,7 +12192,7 @@ pub unsafe fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
#[cfg_attr(test, assert_instr(sqxtun2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
- simd_shuffle8!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Signed saturating extract unsigned narrow
@@ -12203,7 +12203,7 @@ pub unsafe fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(sqxtun2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
- simd_shuffle4!(a, vqmovun_s64(b), [0, 1, 2, 3])
+ simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3])
}
/// Signed saturating rounding doubling multiply returning high half
@@ -12237,7 +12237,7 @@ pub unsafe fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqrdmulhh_s16(a, simd_extract(b, LANE as u32))
}
@@ -12250,7 +12250,7 @@ pub unsafe fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
vqrdmulhh_s16(a, simd_extract(b, LANE as u32))
}
@@ -12263,7 +12263,7 @@ pub unsafe fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vqrdmulhs_s32(a, simd_extract(b, LANE as u32))
}
@@ -12276,7 +12276,7 @@ pub unsafe fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqrdmulhs_s32(a, simd_extract(b, LANE as u32))
}
@@ -12381,8 +12381,8 @@ pub unsafe fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
- static_assert_imm2!(LANE);
- let c: int16x4_t = simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 2);
+ let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmlah_s16(a, b, c)
}
@@ -12395,8 +12395,8 @@ pub unsafe fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
- static_assert_imm3!(LANE);
- let c: int16x4_t = simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 3);
+ let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmlah_s16(a, b, c)
}
@@ -12409,8 +12409,8 @@ pub unsafe fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
- static_assert_imm2!(LANE);
- let c: int16x8_t = simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 2);
+ let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmlahq_s16(a, b, c)
}
@@ -12423,8 +12423,8 @@ pub unsafe fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
- static_assert_imm3!(LANE);
- let c: int16x8_t = simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 3);
+ let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmlahq_s16(a, b, c)
}
@@ -12437,8 +12437,8 @@ pub unsafe fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
- static_assert_imm1!(LANE);
- let c: int32x2_t = simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
vqrdmlah_s32(a, b, c)
}
@@ -12451,8 +12451,8 @@ pub unsafe fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
- static_assert_imm2!(LANE);
- let c: int32x2_t = simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 2);
+ let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
vqrdmlah_s32(a, b, c)
}
@@ -12465,8 +12465,8 @@ pub unsafe fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
- static_assert_imm1!(LANE);
- let c: int32x4_t = simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmlahq_s32(a, b, c)
}
@@ -12479,8 +12479,8 @@ pub unsafe fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- let c: int32x4_t = simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 2);
+ let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmlahq_s32(a, b, c)
}
@@ -12493,7 +12493,7 @@ pub unsafe fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqrdmlahh_s16(a, b, simd_extract(c, LANE as u32))
}
@@ -12506,7 +12506,7 @@ pub unsafe fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t)
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
vqrdmlahh_s16(a, b, simd_extract(c, LANE as u32))
}
@@ -12519,7 +12519,7 @@ pub unsafe fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t)
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vqrdmlahs_s32(a, b, simd_extract(c, LANE as u32))
}
@@ -12532,7 +12532,7 @@ pub unsafe fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t)
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqrdmlahs_s32(a, b, simd_extract(c, LANE as u32))
}
@@ -12637,8 +12637,8 @@ pub unsafe fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
- static_assert_imm2!(LANE);
- let c: int16x4_t = simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 2);
+ let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmlsh_s16(a, b, c)
}
@@ -12651,8 +12651,8 @@ pub unsafe fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
- static_assert_imm3!(LANE);
- let c: int16x4_t = simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 3);
+ let c: int16x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmlsh_s16(a, b, c)
}
@@ -12665,8 +12665,8 @@ pub unsafe fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
- static_assert_imm2!(LANE);
- let c: int16x8_t = simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 2);
+ let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmlshq_s16(a, b, c)
}
@@ -12679,8 +12679,8 @@ pub unsafe fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
- static_assert_imm3!(LANE);
- let c: int16x8_t = simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 3);
+ let c: int16x8_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmlshq_s16(a, b, c)
}
@@ -12693,8 +12693,8 @@ pub unsafe fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
- static_assert_imm1!(LANE);
- let c: int32x2_t = simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
vqrdmlsh_s32(a, b, c)
}
@@ -12707,8 +12707,8 @@ pub unsafe fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
- static_assert_imm2!(LANE);
- let c: int32x2_t = simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 2);
+ let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
vqrdmlsh_s32(a, b, c)
}
@@ -12721,8 +12721,8 @@ pub unsafe fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
- static_assert_imm1!(LANE);
- let c: int32x4_t = simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 1);
+ let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmlshq_s32(a, b, c)
}
@@ -12735,8 +12735,8 @@ pub unsafe fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c:
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- let c: int32x4_t = simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 2);
+ let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmlshq_s32(a, b, c)
}
@@ -12749,7 +12749,7 @@ pub unsafe fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqrdmlshh_s16(a, b, simd_extract(c, LANE as u32))
}
@@ -12762,7 +12762,7 @@ pub unsafe fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t)
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
vqrdmlshh_s16(a, b, simd_extract(c, LANE as u32))
}
@@ -12775,7 +12775,7 @@ pub unsafe fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t)
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
vqrdmlshs_s32(a, b, simd_extract(c, LANE as u32))
}
@@ -12788,7 +12788,7 @@ pub unsafe fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t)
#[rustc_legacy_const_generics(3)]
#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
pub unsafe fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqrdmlshs_s32(a, b, simd_extract(c, LANE as u32))
}
@@ -12917,7 +12917,7 @@ pub unsafe fn vqrshlh_u16(a: u16, b: i16) -> u16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
let a: int16x8_t = vdupq_n_s16(a);
simd_extract(vqrshrn_n_s16::<N>(a), 0)
}
@@ -12931,7 +12931,7 @@ pub unsafe fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
let a: int32x4_t = vdupq_n_s32(a);
simd_extract(vqrshrn_n_s32::<N>(a), 0)
}
@@ -12945,7 +12945,7 @@ pub unsafe fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
let a: int64x2_t = vdupq_n_s64(a);
simd_extract(vqrshrn_n_s64::<N>(a), 0)
}
@@ -12959,8 +12959,8 @@ pub unsafe fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
- simd_shuffle16!(a, vqrshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ static_assert!(N >= 1 && N <= 8);
+ simd_shuffle!(a, vqrshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Signed saturating rounded shift right narrow
@@ -12972,8 +12972,8 @@ pub unsafe fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
- simd_shuffle8!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ static_assert!(N >= 1 && N <= 16);
+ simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Signed saturating rounded shift right narrow
@@ -12985,8 +12985,8 @@ pub unsafe fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> in
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
- simd_shuffle4!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3])
+ static_assert!(N >= 1 && N <= 32);
+ simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3])
}
/// Unsigned saturating rounded shift right narrow
@@ -12998,7 +12998,7 @@ pub unsafe fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> in
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
let a: uint16x8_t = vdupq_n_u16(a);
simd_extract(vqrshrn_n_u16::<N>(a), 0)
}
@@ -13012,7 +13012,7 @@ pub unsafe fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
let a: uint32x4_t = vdupq_n_u32(a);
simd_extract(vqrshrn_n_u32::<N>(a), 0)
}
@@ -13026,7 +13026,7 @@ pub unsafe fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
let a: uint64x2_t = vdupq_n_u64(a);
simd_extract(vqrshrn_n_u64::<N>(a), 0)
}
@@ -13040,8 +13040,8 @@ pub unsafe fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
- simd_shuffle16!(a, vqrshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ static_assert!(N >= 1 && N <= 8);
+ simd_shuffle!(a, vqrshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Unsigned saturating rounded shift right narrow
@@ -13053,8 +13053,8 @@ pub unsafe fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> u
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
- simd_shuffle8!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ static_assert!(N >= 1 && N <= 16);
+ simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Unsigned saturating rounded shift right narrow
@@ -13066,8 +13066,8 @@ pub unsafe fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
- simd_shuffle4!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3])
+ static_assert!(N >= 1 && N <= 32);
+ simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3])
}
/// Signed saturating rounded shift right unsigned narrow
@@ -13079,7 +13079,7 @@ pub unsafe fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) ->
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
let a: int16x8_t = vdupq_n_s16(a);
simd_extract(vqrshrun_n_s16::<N>(a), 0)
}
@@ -13093,7 +13093,7 @@ pub unsafe fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
let a: int32x4_t = vdupq_n_s32(a);
simd_extract(vqrshrun_n_s32::<N>(a), 0)
}
@@ -13107,7 +13107,7 @@ pub unsafe fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
let a: int64x2_t = vdupq_n_s64(a);
simd_extract(vqrshrun_n_s64::<N>(a), 0)
}
@@ -13121,8 +13121,8 @@ pub unsafe fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
- simd_shuffle16!(a, vqrshrun_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ static_assert!(N >= 1 && N <= 8);
+ simd_shuffle!(a, vqrshrun_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Signed saturating rounded shift right unsigned narrow
@@ -13134,8 +13134,8 @@ pub unsafe fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> u
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
- simd_shuffle8!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ static_assert!(N >= 1 && N <= 16);
+ simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Signed saturating rounded shift right unsigned narrow
@@ -13147,8 +13147,8 @@ pub unsafe fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) ->
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
- simd_shuffle4!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3])
+ static_assert!(N >= 1 && N <= 32);
+ simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3])
}
/// Signed saturating shift left
@@ -13264,7 +13264,7 @@ pub unsafe fn vqshls_u32(a: u32, b: i32) -> u32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_extract(vqshl_n_s8::<N>(vdup_n_s8(a)), 0)
}
@@ -13277,7 +13277,7 @@ pub unsafe fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
simd_extract(vqshl_n_s16::<N>(vdup_n_s16(a)), 0)
}
@@ -13290,7 +13290,7 @@ pub unsafe fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
simd_extract(vqshl_n_s32::<N>(vdup_n_s32(a)), 0)
}
@@ -13303,7 +13303,7 @@ pub unsafe fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
simd_extract(vqshl_n_s64::<N>(vdup_n_s64(a)), 0)
}
@@ -13316,7 +13316,7 @@ pub unsafe fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_extract(vqshl_n_u8::<N>(vdup_n_u8(a)), 0)
}
@@ -13329,7 +13329,7 @@ pub unsafe fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
simd_extract(vqshl_n_u16::<N>(vdup_n_u16(a)), 0)
}
@@ -13342,7 +13342,7 @@ pub unsafe fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
simd_extract(vqshl_n_u32::<N>(vdup_n_u32(a)), 0)
}
@@ -13355,7 +13355,7 @@ pub unsafe fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
simd_extract(vqshl_n_u64::<N>(vdup_n_u64(a)), 0)
}
@@ -13368,7 +13368,7 @@ pub unsafe fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_extract(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0)
}
@@ -13381,7 +13381,7 @@ pub unsafe fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
simd_extract(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0)
}
@@ -13394,7 +13394,7 @@ pub unsafe fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
simd_extract(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0)
}
@@ -13407,7 +13407,7 @@ pub unsafe fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
simd_extract(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0)
}
@@ -13420,7 +13420,7 @@ pub unsafe fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.i32")]
@@ -13438,7 +13438,7 @@ pub unsafe fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_extract(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0)
}
@@ -13451,7 +13451,7 @@ pub unsafe fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_extract(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0)
}
@@ -13464,8 +13464,8 @@ pub unsafe fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
- simd_shuffle16!(a, vqshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ static_assert!(N >= 1 && N <= 8);
+ simd_shuffle!(a, vqshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Signed saturating shift right narrow
@@ -13477,8 +13477,8 @@ pub unsafe fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
- simd_shuffle8!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ static_assert!(N >= 1 && N <= 16);
+ simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Signed saturating shift right narrow
@@ -13490,8 +13490,8 @@ pub unsafe fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
- simd_shuffle4!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3])
+ static_assert!(N >= 1 && N <= 32);
+ simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3])
}
/// Unsigned saturating shift right narrow
@@ -13503,7 +13503,7 @@ pub unsafe fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.i32")]
@@ -13521,7 +13521,7 @@ pub unsafe fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_extract(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0)
}
@@ -13534,7 +13534,7 @@ pub unsafe fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_extract(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0)
}
@@ -13547,8 +13547,8 @@ pub unsafe fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
- simd_shuffle16!(a, vqshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ static_assert!(N >= 1 && N <= 8);
+ simd_shuffle!(a, vqshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Unsigned saturating shift right narrow
@@ -13560,8 +13560,8 @@ pub unsafe fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> ui
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
- simd_shuffle8!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ static_assert!(N >= 1 && N <= 16);
+ simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Unsigned saturating shift right narrow
@@ -13573,8 +13573,8 @@ pub unsafe fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> u
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
- simd_shuffle4!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3])
+ static_assert!(N >= 1 && N <= 32);
+ simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3])
}
/// Signed saturating shift right unsigned narrow
@@ -13586,7 +13586,7 @@ pub unsafe fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> u
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_extract(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0)
}
@@ -13599,7 +13599,7 @@ pub unsafe fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_extract(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0)
}
@@ -13612,7 +13612,7 @@ pub unsafe fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
simd_extract(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0)
}
@@ -13625,8 +13625,8 @@ pub unsafe fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
- simd_shuffle16!(a, vqshrun_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ static_assert!(N >= 1 && N <= 8);
+ simd_shuffle!(a, vqshrun_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Signed saturating shift right unsigned narrow
@@ -13638,8 +13638,8 @@ pub unsafe fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> ui
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
- simd_shuffle8!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ static_assert!(N >= 1 && N <= 16);
+ simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Signed saturating shift right unsigned narrow
@@ -13651,8 +13651,8 @@ pub unsafe fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> u
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
- simd_shuffle4!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3])
+ static_assert!(N >= 1 && N <= 32);
+ simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3])
}
/// Unsigned saturating accumulate of signed value
@@ -14764,7 +14764,7 @@ pub unsafe fn vrshld_u64(a: u64, b: i64) -> u64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
vrshld_s64(a, -N as i64)
}
@@ -14777,7 +14777,7 @@ pub unsafe fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
vrshld_u64(a, -N as i64)
}
@@ -14790,8 +14790,8 @@ pub unsafe fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
- simd_shuffle16!(a, vrshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ static_assert!(N >= 1 && N <= 8);
+ simd_shuffle!(a, vrshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Rounding shift right narrow
@@ -14803,8 +14803,8 @@ pub unsafe fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
- simd_shuffle8!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ static_assert!(N >= 1 && N <= 16);
+ simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Rounding shift right narrow
@@ -14816,8 +14816,8 @@ pub unsafe fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
- simd_shuffle4!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3])
+ static_assert!(N >= 1 && N <= 32);
+ simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3])
}
/// Rounding shift right narrow
@@ -14829,8 +14829,8 @@ pub unsafe fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
- simd_shuffle16!(a, vrshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ static_assert!(N >= 1 && N <= 8);
+ simd_shuffle!(a, vrshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Rounding shift right narrow
@@ -14842,8 +14842,8 @@ pub unsafe fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> ui
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
- simd_shuffle8!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ static_assert!(N >= 1 && N <= 16);
+ simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Rounding shift right narrow
@@ -14855,8 +14855,8 @@ pub unsafe fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> u
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
- simd_shuffle4!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3])
+ static_assert!(N >= 1 && N <= 32);
+ simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3])
}
/// Signed rounding shift right and accumulate.
@@ -14868,7 +14868,7 @@ pub unsafe fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> u
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
let b: i64 = vrshrd_n_s64::<N>(b);
a.wrapping_add(b)
}
@@ -14882,7 +14882,7 @@ pub unsafe fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
let b: u64 = vrshrd_n_u64::<N>(b);
a.wrapping_add(b)
}
@@ -14896,7 +14896,7 @@ pub unsafe fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
let x: int8x8_t = vrsubhn_s16(b, c);
- simd_shuffle16!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Rounding subtract returning high narrow
@@ -14908,7 +14908,7 @@ pub unsafe fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
let x: int16x4_t = vrsubhn_s32(b, c);
- simd_shuffle8!(a, x, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Rounding subtract returning high narrow
@@ -14920,7 +14920,7 @@ pub unsafe fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int1
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
let x: int32x2_t = vrsubhn_s64(b, c);
- simd_shuffle4!(a, x, [0, 1, 2, 3])
+ simd_shuffle!(a, x, [0, 1, 2, 3])
}
/// Rounding subtract returning high narrow
@@ -14932,7 +14932,7 @@ pub unsafe fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int3
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
let x: uint8x8_t = vrsubhn_u16(b, c);
- simd_shuffle16!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Rounding subtract returning high narrow
@@ -14944,7 +14944,7 @@ pub unsafe fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> ui
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
let x: uint16x4_t = vrsubhn_u32(b, c);
- simd_shuffle8!(a, x, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Rounding subtract returning high narrow
@@ -14956,7 +14956,7 @@ pub unsafe fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> u
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
let x: uint32x2_t = vrsubhn_u64(b, c);
- simd_shuffle4!(a, x, [0, 1, 2, 3])
+ simd_shuffle!(a, x, [0, 1, 2, 3])
}
/// Insert vector element from another vector element
@@ -14968,7 +14968,7 @@ pub unsafe fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> u
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
simd_insert(b, LANE as u32, a)
}
@@ -14981,7 +14981,7 @@ pub unsafe fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(b, LANE as u32, a)
}
@@ -15016,8 +15016,8 @@ pub unsafe fn vshld_u64(a: u64, b: i64) -> u64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
- static_assert!(N : i32 where N >= 0 && N <= 8);
- let b: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ static_assert!(N >= 0 && N <= 8);
+ let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
vshll_n_s8::<N>(b)
}
@@ -15030,8 +15030,8 @@ pub unsafe fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
- static_assert!(N : i32 where N >= 0 && N <= 16);
- let b: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
+ static_assert!(N >= 0 && N <= 16);
+ let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
vshll_n_s16::<N>(b)
}
@@ -15044,8 +15044,8 @@ pub unsafe fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
- static_assert!(N : i32 where N >= 0 && N <= 32);
- let b: int32x2_t = simd_shuffle2!(a, a, [2, 3]);
+ static_assert!(N >= 0 && N <= 32);
+ let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
vshll_n_s32::<N>(b)
}
@@ -15058,8 +15058,8 @@ pub unsafe fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
- static_assert!(N : i32 where N >= 0 && N <= 8);
- let b: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ static_assert!(N >= 0 && N <= 8);
+ let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
vshll_n_u8::<N>(b)
}
@@ -15072,8 +15072,8 @@ pub unsafe fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 0 && N <= 16);
- let b: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
+ static_assert!(N >= 0 && N <= 16);
+ let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
vshll_n_u16::<N>(b)
}
@@ -15086,8 +15086,8 @@ pub unsafe fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
- static_assert!(N : i32 where N >= 0 && N <= 32);
- let b: uint32x2_t = simd_shuffle2!(a, a, [2, 3]);
+ static_assert!(N >= 0 && N <= 32);
+ let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
vshll_n_u32::<N>(b)
}
@@ -15100,8 +15100,8 @@ pub unsafe fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
- simd_shuffle16!(a, vshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ static_assert!(N >= 1 && N <= 8);
+ simd_shuffle!(a, vshrn_n_s16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Shift right narrow
@@ -15113,8 +15113,8 @@ pub unsafe fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
- simd_shuffle8!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ static_assert!(N >= 1 && N <= 16);
+ simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Shift right narrow
@@ -15126,8 +15126,8 @@ pub unsafe fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int1
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
- simd_shuffle4!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3])
+ static_assert!(N >= 1 && N <= 32);
+ simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3])
}
/// Shift right narrow
@@ -15139,8 +15139,8 @@ pub unsafe fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int3
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
- simd_shuffle16!(a, vshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ static_assert!(N >= 1 && N <= 8);
+ simd_shuffle!(a, vshrn_n_u16::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Shift right narrow
@@ -15152,8 +15152,8 @@ pub unsafe fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uin
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
- simd_shuffle8!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
+ static_assert!(N >= 1 && N <= 16);
+ simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Shift right narrow
@@ -15165,8 +15165,8 @@ pub unsafe fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> ui
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
- simd_shuffle4!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3])
+ static_assert!(N >= 1 && N <= 32);
+ simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3])
}
/// SM3PARTW1
@@ -15447,7 +15447,7 @@ pub unsafe fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
+ simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
}
/// Transpose vectors
@@ -15458,7 +15458,7 @@ pub unsafe fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30])
+ simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30])
}
/// Transpose vectors
@@ -15469,7 +15469,7 @@ pub unsafe fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- simd_shuffle4!(a, b, [0, 4, 2, 6])
+ simd_shuffle!(a, b, [0, 4, 2, 6])
}
/// Transpose vectors
@@ -15480,7 +15480,7 @@ pub unsafe fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
+ simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
}
/// Transpose vectors
@@ -15491,7 +15491,7 @@ pub unsafe fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- simd_shuffle4!(a, b, [0, 4, 2, 6])
+ simd_shuffle!(a, b, [0, 4, 2, 6])
}
/// Transpose vectors
@@ -15502,7 +15502,7 @@ pub unsafe fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
+ simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
}
/// Transpose vectors
@@ -15513,7 +15513,7 @@ pub unsafe fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30])
+ simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30])
}
/// Transpose vectors
@@ -15524,7 +15524,7 @@ pub unsafe fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- simd_shuffle4!(a, b, [0, 4, 2, 6])
+ simd_shuffle!(a, b, [0, 4, 2, 6])
}
/// Transpose vectors
@@ -15535,7 +15535,7 @@ pub unsafe fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
+ simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
}
/// Transpose vectors
@@ -15546,7 +15546,7 @@ pub unsafe fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- simd_shuffle4!(a, b, [0, 4, 2, 6])
+ simd_shuffle!(a, b, [0, 4, 2, 6])
}
/// Transpose vectors
@@ -15557,7 +15557,7 @@ pub unsafe fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
- simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
+ simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
}
/// Transpose vectors
@@ -15568,7 +15568,7 @@ pub unsafe fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
- simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30])
+ simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30])
}
/// Transpose vectors
@@ -15579,7 +15579,7 @@ pub unsafe fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
- simd_shuffle4!(a, b, [0, 4, 2, 6])
+ simd_shuffle!(a, b, [0, 4, 2, 6])
}
/// Transpose vectors
@@ -15590,7 +15590,7 @@ pub unsafe fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
- simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
+ simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14])
}
/// Transpose vectors
@@ -15601,7 +15601,7 @@ pub unsafe fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Transpose vectors
@@ -15612,7 +15612,7 @@ pub unsafe fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Transpose vectors
@@ -15623,7 +15623,7 @@ pub unsafe fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Transpose vectors
@@ -15634,7 +15634,7 @@ pub unsafe fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Transpose vectors
@@ -15645,7 +15645,7 @@ pub unsafe fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Transpose vectors
@@ -15656,7 +15656,7 @@ pub unsafe fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
#[cfg_attr(test, assert_instr(trn1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
- simd_shuffle4!(a, b, [0, 4, 2, 6])
+ simd_shuffle!(a, b, [0, 4, 2, 6])
}
/// Transpose vectors
@@ -15667,7 +15667,7 @@ pub unsafe fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Transpose vectors
@@ -15678,7 +15678,7 @@ pub unsafe fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Transpose vectors
@@ -15689,7 +15689,7 @@ pub unsafe fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
+ simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
}
/// Transpose vectors
@@ -15700,7 +15700,7 @@ pub unsafe fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31])
+ simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31])
}
/// Transpose vectors
@@ -15711,7 +15711,7 @@ pub unsafe fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- simd_shuffle4!(a, b, [1, 5, 3, 7])
+ simd_shuffle!(a, b, [1, 5, 3, 7])
}
/// Transpose vectors
@@ -15722,7 +15722,7 @@ pub unsafe fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
+ simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
}
/// Transpose vectors
@@ -15733,7 +15733,7 @@ pub unsafe fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- simd_shuffle4!(a, b, [1, 5, 3, 7])
+ simd_shuffle!(a, b, [1, 5, 3, 7])
}
/// Transpose vectors
@@ -15744,7 +15744,7 @@ pub unsafe fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
+ simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
}
/// Transpose vectors
@@ -15755,7 +15755,7 @@ pub unsafe fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31])
+ simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31])
}
/// Transpose vectors
@@ -15766,7 +15766,7 @@ pub unsafe fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- simd_shuffle4!(a, b, [1, 5, 3, 7])
+ simd_shuffle!(a, b, [1, 5, 3, 7])
}
/// Transpose vectors
@@ -15777,7 +15777,7 @@ pub unsafe fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
+ simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
}
/// Transpose vectors
@@ -15788,7 +15788,7 @@ pub unsafe fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- simd_shuffle4!(a, b, [1, 5, 3, 7])
+ simd_shuffle!(a, b, [1, 5, 3, 7])
}
/// Transpose vectors
@@ -15799,7 +15799,7 @@ pub unsafe fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
- simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
+ simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
}
/// Transpose vectors
@@ -15810,7 +15810,7 @@ pub unsafe fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
- simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31])
+ simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31])
}
/// Transpose vectors
@@ -15821,7 +15821,7 @@ pub unsafe fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
- simd_shuffle4!(a, b, [1, 5, 3, 7])
+ simd_shuffle!(a, b, [1, 5, 3, 7])
}
/// Transpose vectors
@@ -15832,7 +15832,7 @@ pub unsafe fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
- simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
+ simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15])
}
/// Transpose vectors
@@ -15843,7 +15843,7 @@ pub unsafe fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Transpose vectors
@@ -15854,7 +15854,7 @@ pub unsafe fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Transpose vectors
@@ -15865,7 +15865,7 @@ pub unsafe fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Transpose vectors
@@ -15876,7 +15876,7 @@ pub unsafe fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Transpose vectors
@@ -15887,7 +15887,7 @@ pub unsafe fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Transpose vectors
@@ -15898,7 +15898,7 @@ pub unsafe fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
#[cfg_attr(test, assert_instr(trn2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
- simd_shuffle4!(a, b, [1, 5, 3, 7])
+ simd_shuffle!(a, b, [1, 5, 3, 7])
}
/// Transpose vectors
@@ -15909,7 +15909,7 @@ pub unsafe fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Transpose vectors
@@ -15920,7 +15920,7 @@ pub unsafe fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Zip vectors
@@ -15931,7 +15931,7 @@ pub unsafe fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
+ simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
}
/// Zip vectors
@@ -15942,7 +15942,7 @@ pub unsafe fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23])
+ simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23])
}
/// Zip vectors
@@ -15953,7 +15953,7 @@ pub unsafe fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- simd_shuffle4!(a, b, [0, 4, 1, 5])
+ simd_shuffle!(a, b, [0, 4, 1, 5])
}
/// Zip vectors
@@ -15964,7 +15964,7 @@ pub unsafe fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
+ simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
}
/// Zip vectors
@@ -15975,7 +15975,7 @@ pub unsafe fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Zip vectors
@@ -15986,7 +15986,7 @@ pub unsafe fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- simd_shuffle4!(a, b, [0, 4, 1, 5])
+ simd_shuffle!(a, b, [0, 4, 1, 5])
}
/// Zip vectors
@@ -15997,7 +15997,7 @@ pub unsafe fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Zip vectors
@@ -16008,7 +16008,7 @@ pub unsafe fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
+ simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
}
/// Zip vectors
@@ -16019,7 +16019,7 @@ pub unsafe fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23])
+ simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23])
}
/// Zip vectors
@@ -16030,7 +16030,7 @@ pub unsafe fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- simd_shuffle4!(a, b, [0, 4, 1, 5])
+ simd_shuffle!(a, b, [0, 4, 1, 5])
}
/// Zip vectors
@@ -16041,7 +16041,7 @@ pub unsafe fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
+ simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
}
/// Zip vectors
@@ -16052,7 +16052,7 @@ pub unsafe fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Zip vectors
@@ -16063,7 +16063,7 @@ pub unsafe fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- simd_shuffle4!(a, b, [0, 4, 1, 5])
+ simd_shuffle!(a, b, [0, 4, 1, 5])
}
/// Zip vectors
@@ -16074,7 +16074,7 @@ pub unsafe fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Zip vectors
@@ -16085,7 +16085,7 @@ pub unsafe fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
- simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
+ simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
}
/// Zip vectors
@@ -16096,7 +16096,7 @@ pub unsafe fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
- simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23])
+ simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23])
}
/// Zip vectors
@@ -16107,7 +16107,7 @@ pub unsafe fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
- simd_shuffle4!(a, b, [0, 4, 1, 5])
+ simd_shuffle!(a, b, [0, 4, 1, 5])
}
/// Zip vectors
@@ -16118,7 +16118,7 @@ pub unsafe fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
- simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
+ simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11])
}
/// Zip vectors
@@ -16129,7 +16129,7 @@ pub unsafe fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Zip vectors
@@ -16140,7 +16140,7 @@ pub unsafe fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Zip vectors
@@ -16151,7 +16151,7 @@ pub unsafe fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
- simd_shuffle4!(a, b, [0, 4, 1, 5])
+ simd_shuffle!(a, b, [0, 4, 1, 5])
}
/// Zip vectors
@@ -16162,7 +16162,7 @@ pub unsafe fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Zip vectors
@@ -16173,7 +16173,7 @@ pub unsafe fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
+ simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
}
/// Zip vectors
@@ -16184,7 +16184,7 @@ pub unsafe fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31])
+ simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31])
}
/// Zip vectors
@@ -16195,7 +16195,7 @@ pub unsafe fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- simd_shuffle4!(a, b, [2, 6, 3, 7])
+ simd_shuffle!(a, b, [2, 6, 3, 7])
}
/// Zip vectors
@@ -16206,7 +16206,7 @@ pub unsafe fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
+ simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
}
/// Zip vectors
@@ -16217,7 +16217,7 @@ pub unsafe fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Zip vectors
@@ -16228,7 +16228,7 @@ pub unsafe fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- simd_shuffle4!(a, b, [2, 6, 3, 7])
+ simd_shuffle!(a, b, [2, 6, 3, 7])
}
/// Zip vectors
@@ -16239,7 +16239,7 @@ pub unsafe fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Zip vectors
@@ -16250,7 +16250,7 @@ pub unsafe fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
+ simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
}
/// Zip vectors
@@ -16261,7 +16261,7 @@ pub unsafe fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31])
+ simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31])
}
/// Zip vectors
@@ -16272,7 +16272,7 @@ pub unsafe fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- simd_shuffle4!(a, b, [2, 6, 3, 7])
+ simd_shuffle!(a, b, [2, 6, 3, 7])
}
/// Zip vectors
@@ -16283,7 +16283,7 @@ pub unsafe fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
+ simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
}
/// Zip vectors
@@ -16294,7 +16294,7 @@ pub unsafe fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Zip vectors
@@ -16305,7 +16305,7 @@ pub unsafe fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- simd_shuffle4!(a, b, [2, 6, 3, 7])
+ simd_shuffle!(a, b, [2, 6, 3, 7])
}
/// Zip vectors
@@ -16316,7 +16316,7 @@ pub unsafe fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Zip vectors
@@ -16327,7 +16327,7 @@ pub unsafe fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
- simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
+ simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
}
/// Zip vectors
@@ -16338,7 +16338,7 @@ pub unsafe fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
- simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31])
+ simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31])
}
/// Zip vectors
@@ -16349,7 +16349,7 @@ pub unsafe fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
- simd_shuffle4!(a, b, [2, 6, 3, 7])
+ simd_shuffle!(a, b, [2, 6, 3, 7])
}
/// Zip vectors
@@ -16360,7 +16360,7 @@ pub unsafe fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
- simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
+ simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15])
}
/// Zip vectors
@@ -16371,7 +16371,7 @@ pub unsafe fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Zip vectors
@@ -16382,7 +16382,7 @@ pub unsafe fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Zip vectors
@@ -16393,7 +16393,7 @@ pub unsafe fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
- simd_shuffle4!(a, b, [2, 6, 3, 7])
+ simd_shuffle!(a, b, [2, 6, 3, 7])
}
/// Zip vectors
@@ -16404,7 +16404,7 @@ pub unsafe fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Unzip vectors
@@ -16415,7 +16415,7 @@ pub unsafe fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
+ simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
}
/// Unzip vectors
@@ -16426,7 +16426,7 @@ pub unsafe fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
+ simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
}
/// Unzip vectors
@@ -16437,7 +16437,7 @@ pub unsafe fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- simd_shuffle4!(a, b, [0, 2, 4, 6])
+ simd_shuffle!(a, b, [0, 2, 4, 6])
}
/// Unzip vectors
@@ -16448,7 +16448,7 @@ pub unsafe fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
+ simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
}
/// Unzip vectors
@@ -16459,7 +16459,7 @@ pub unsafe fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- simd_shuffle4!(a, b, [0, 2, 4, 6])
+ simd_shuffle!(a, b, [0, 2, 4, 6])
}
/// Unzip vectors
@@ -16470,7 +16470,7 @@ pub unsafe fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
+ simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
}
/// Unzip vectors
@@ -16481,7 +16481,7 @@ pub unsafe fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
+ simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
}
/// Unzip vectors
@@ -16492,7 +16492,7 @@ pub unsafe fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- simd_shuffle4!(a, b, [0, 2, 4, 6])
+ simd_shuffle!(a, b, [0, 2, 4, 6])
}
/// Unzip vectors
@@ -16503,7 +16503,7 @@ pub unsafe fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
+ simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
}
/// Unzip vectors
@@ -16514,7 +16514,7 @@ pub unsafe fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- simd_shuffle4!(a, b, [0, 2, 4, 6])
+ simd_shuffle!(a, b, [0, 2, 4, 6])
}
/// Unzip vectors
@@ -16525,7 +16525,7 @@ pub unsafe fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
- simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
+ simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
}
/// Unzip vectors
@@ -16536,7 +16536,7 @@ pub unsafe fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
- simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
+ simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
}
/// Unzip vectors
@@ -16547,7 +16547,7 @@ pub unsafe fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
- simd_shuffle4!(a, b, [0, 2, 4, 6])
+ simd_shuffle!(a, b, [0, 2, 4, 6])
}
/// Unzip vectors
@@ -16558,7 +16558,7 @@ pub unsafe fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
- simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
+ simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14])
}
/// Unzip vectors
@@ -16569,7 +16569,7 @@ pub unsafe fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Unzip vectors
@@ -16580,7 +16580,7 @@ pub unsafe fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Unzip vectors
@@ -16591,7 +16591,7 @@ pub unsafe fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Unzip vectors
@@ -16602,7 +16602,7 @@ pub unsafe fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Unzip vectors
@@ -16613,7 +16613,7 @@ pub unsafe fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Unzip vectors
@@ -16624,7 +16624,7 @@ pub unsafe fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
#[cfg_attr(test, assert_instr(uzp1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
- simd_shuffle4!(a, b, [0, 2, 4, 6])
+ simd_shuffle!(a, b, [0, 2, 4, 6])
}
/// Unzip vectors
@@ -16635,7 +16635,7 @@ pub unsafe fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Unzip vectors
@@ -16646,7 +16646,7 @@ pub unsafe fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[cfg_attr(test, assert_instr(zip1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
/// Unzip vectors
@@ -16657,7 +16657,7 @@ pub unsafe fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
+ simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
}
/// Unzip vectors
@@ -16668,7 +16668,7 @@ pub unsafe fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])
+ simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])
}
/// Unzip vectors
@@ -16679,7 +16679,7 @@ pub unsafe fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- simd_shuffle4!(a, b, [1, 3, 5, 7])
+ simd_shuffle!(a, b, [1, 3, 5, 7])
}
/// Unzip vectors
@@ -16690,7 +16690,7 @@ pub unsafe fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
+ simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
}
/// Unzip vectors
@@ -16701,7 +16701,7 @@ pub unsafe fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- simd_shuffle4!(a, b, [1, 3, 5, 7])
+ simd_shuffle!(a, b, [1, 3, 5, 7])
}
/// Unzip vectors
@@ -16712,7 +16712,7 @@ pub unsafe fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
+ simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
}
/// Unzip vectors
@@ -16723,7 +16723,7 @@ pub unsafe fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])
+ simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])
}
/// Unzip vectors
@@ -16734,7 +16734,7 @@ pub unsafe fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- simd_shuffle4!(a, b, [1, 3, 5, 7])
+ simd_shuffle!(a, b, [1, 3, 5, 7])
}
/// Unzip vectors
@@ -16745,7 +16745,7 @@ pub unsafe fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
+ simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
}
/// Unzip vectors
@@ -16756,7 +16756,7 @@ pub unsafe fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- simd_shuffle4!(a, b, [1, 3, 5, 7])
+ simd_shuffle!(a, b, [1, 3, 5, 7])
}
/// Unzip vectors
@@ -16767,7 +16767,7 @@ pub unsafe fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
- simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
+ simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
}
/// Unzip vectors
@@ -16778,7 +16778,7 @@ pub unsafe fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
- simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])
+ simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31])
}
/// Unzip vectors
@@ -16789,7 +16789,7 @@ pub unsafe fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
- simd_shuffle4!(a, b, [1, 3, 5, 7])
+ simd_shuffle!(a, b, [1, 3, 5, 7])
}
/// Unzip vectors
@@ -16800,7 +16800,7 @@ pub unsafe fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
- simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
+ simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15])
}
/// Unzip vectors
@@ -16811,7 +16811,7 @@ pub unsafe fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Unzip vectors
@@ -16822,7 +16822,7 @@ pub unsafe fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Unzip vectors
@@ -16833,7 +16833,7 @@ pub unsafe fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Unzip vectors
@@ -16844,7 +16844,7 @@ pub unsafe fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Unzip vectors
@@ -16855,7 +16855,7 @@ pub unsafe fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Unzip vectors
@@ -16866,7 +16866,7 @@ pub unsafe fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
#[cfg_attr(test, assert_instr(uzp2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
- simd_shuffle4!(a, b, [1, 3, 5, 7])
+ simd_shuffle!(a, b, [1, 3, 5, 7])
}
/// Unzip vectors
@@ -16877,7 +16877,7 @@ pub unsafe fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Unzip vectors
@@ -16888,7 +16888,7 @@ pub unsafe fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
#[cfg_attr(test, assert_instr(zip2))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// Unsigned Absolute difference and Accumulate Long
@@ -16899,8 +16899,8 @@ pub unsafe fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
#[cfg_attr(test, assert_instr(uabal))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
- let d: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
- let e: uint8x8_t = simd_shuffle8!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
let f: uint8x8_t = vabd_u8(d, e);
simd_add(a, simd_cast(f))
}
@@ -16913,8 +16913,8 @@ pub unsafe fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint
#[cfg_attr(test, assert_instr(uabal))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
- let d: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
- let e: uint16x4_t = simd_shuffle4!(c, c, [4, 5, 6, 7]);
+ let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
+ let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
let f: uint16x4_t = vabd_u16(d, e);
simd_add(a, simd_cast(f))
}
@@ -16927,8 +16927,8 @@ pub unsafe fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uin
#[cfg_attr(test, assert_instr(uabal))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
- let d: uint32x2_t = simd_shuffle2!(b, b, [2, 3]);
- let e: uint32x2_t = simd_shuffle2!(c, c, [2, 3]);
+ let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
+ let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
let f: uint32x2_t = vabd_u32(d, e);
simd_add(a, simd_cast(f))
}
@@ -16941,8 +16941,8 @@ pub unsafe fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uin
#[cfg_attr(test, assert_instr(sabal))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
- let d: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
- let e: int8x8_t = simd_shuffle8!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
let f: int8x8_t = vabd_s8(d, e);
let f: uint8x8_t = simd_cast(f);
simd_add(a, simd_cast(f))
@@ -16956,8 +16956,8 @@ pub unsafe fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8
#[cfg_attr(test, assert_instr(sabal))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
- let d: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
- let e: int16x4_t = simd_shuffle4!(c, c, [4, 5, 6, 7]);
+ let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
+ let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
let f: int16x4_t = vabd_s16(d, e);
let f: uint16x4_t = simd_cast(f);
simd_add(a, simd_cast(f))
@@ -16971,14 +16971,14 @@ pub unsafe fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x
#[cfg_attr(test, assert_instr(sabal))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
- let d: int32x2_t = simd_shuffle2!(b, b, [2, 3]);
- let e: int32x2_t = simd_shuffle2!(c, c, [2, 3]);
+ let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
+ let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
let f: int32x2_t = vabd_s32(d, e);
let f: uint32x2_t = simd_cast(f);
simd_add(a, simd_cast(f))
}
-/// Singned saturating Absolute value
+/// Signed saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)
#[inline]
@@ -16994,7 +16994,7 @@ pub unsafe fn vqabs_s64(a: int64x1_t) -> int64x1_t {
vqabs_s64_(a)
}
-/// Singned saturating Absolute value
+/// Signed saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)
#[inline]
@@ -17073,7 +17073,7 @@ pub unsafe fn vqabsd_s64(a: i64) -> i64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
- static_assert!(N : i32 where N >= 0 && N <= 63);
+ static_assert!(N >= 0 && N <= 63);
transmute(vsli_n_s64::<N>(transmute(a), transmute(b)))
}
@@ -17086,7 +17086,7 @@ pub unsafe fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
- static_assert!(N : i32 where N >= 0 && N <= 63);
+ static_assert!(N >= 0 && N <= 63);
transmute(vsli_n_u64::<N>(transmute(a), transmute(b)))
}
@@ -17099,7 +17099,7 @@ pub unsafe fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
transmute(vsri_n_s64::<N>(transmute(a), transmute(b)))
}
@@ -17112,7 +17112,7 @@ pub unsafe fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
transmute(vsri_n_u64::<N>(transmute(a), transmute(b)))
}
@@ -22346,7 +22346,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vmull_p64() {
let a: p64 = 15;
let b: p64 = 3;
@@ -22364,7 +22364,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vmull_high_p64() {
let a: i64x2 = i64x2::new(1, 15);
let b: i64x2 = i64x2::new(1, 3);
@@ -23329,7 +23329,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcadd_rot270_f32() {
let a: f32x2 = f32x2::new(1., -1.);
let b: f32x2 = f32x2::new(-1., 1.);
@@ -23338,7 +23338,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcaddq_rot270_f32() {
let a: f32x4 = f32x4::new(1., -1., 1., -1.);
let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
@@ -23347,7 +23347,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcaddq_rot270_f64() {
let a: f64x2 = f64x2::new(1., -1.);
let b: f64x2 = f64x2::new(-1., 1.);
@@ -23356,7 +23356,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcadd_rot90_f32() {
let a: f32x2 = f32x2::new(1., -1.);
let b: f32x2 = f32x2::new(-1., 1.);
@@ -23365,7 +23365,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcaddq_rot90_f32() {
let a: f32x4 = f32x4::new(1., -1., 1., -1.);
let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
@@ -23374,7 +23374,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcaddq_rot90_f64() {
let a: f64x2 = f64x2::new(1., -1.);
let b: f64x2 = f64x2::new(-1., 1.);
@@ -23383,7 +23383,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmla_f32() {
let a: f32x2 = f32x2::new(1., -1.);
let b: f32x2 = f32x2::new(-1., 1.);
@@ -23393,7 +23393,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_f32() {
let a: f32x4 = f32x4::new(1., -1., 1., -1.);
let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
@@ -23403,7 +23403,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_f64() {
let a: f64x2 = f64x2::new(1., -1.);
let b: f64x2 = f64x2::new(-1., 1.);
@@ -23413,7 +23413,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmla_rot90_f32() {
let a: f32x2 = f32x2::new(1., 1.);
let b: f32x2 = f32x2::new(1., -1.);
@@ -23423,7 +23423,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_rot90_f32() {
let a: f32x4 = f32x4::new(1., 1., 1., 1.);
let b: f32x4 = f32x4::new(1., -1., 1., -1.);
@@ -23433,7 +23433,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_rot90_f64() {
let a: f64x2 = f64x2::new(1., 1.);
let b: f64x2 = f64x2::new(1., -1.);
@@ -23443,7 +23443,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmla_rot180_f32() {
let a: f32x2 = f32x2::new(1., 1.);
let b: f32x2 = f32x2::new(1., -1.);
@@ -23453,7 +23453,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_rot180_f32() {
let a: f32x4 = f32x4::new(1., 1., 1., 1.);
let b: f32x4 = f32x4::new(1., -1., 1., -1.);
@@ -23463,7 +23463,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_rot180_f64() {
let a: f64x2 = f64x2::new(1., 1.);
let b: f64x2 = f64x2::new(1., -1.);
@@ -23473,7 +23473,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmla_rot270_f32() {
let a: f32x2 = f32x2::new(1., 1.);
let b: f32x2 = f32x2::new(1., -1.);
@@ -23483,7 +23483,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_rot270_f32() {
let a: f32x4 = f32x4::new(1., 1., 1., 1.);
let b: f32x4 = f32x4::new(1., -1., 1., -1.);
@@ -23493,7 +23493,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_rot270_f64() {
let a: f64x2 = f64x2::new(1., 1.);
let b: f64x2 = f64x2::new(1., -1.);
@@ -23503,7 +23503,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmla_lane_f32() {
let a: f32x2 = f32x2::new(1., -1.);
let b: f32x2 = f32x2::new(-1., 1.);
@@ -23513,7 +23513,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmla_laneq_f32() {
let a: f32x2 = f32x2::new(1., -1.);
let b: f32x2 = f32x2::new(-1., 1.);
@@ -23523,7 +23523,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_lane_f32() {
let a: f32x4 = f32x4::new(1., -1., 1., -1.);
let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
@@ -23533,7 +23533,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_laneq_f32() {
let a: f32x4 = f32x4::new(1., -1., 1., -1.);
let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
@@ -23543,7 +23543,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmla_rot90_lane_f32() {
let a: f32x2 = f32x2::new(1., -1.);
let b: f32x2 = f32x2::new(-1., 1.);
@@ -23553,7 +23553,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmla_rot90_laneq_f32() {
let a: f32x2 = f32x2::new(1., -1.);
let b: f32x2 = f32x2::new(-1., 1.);
@@ -23563,7 +23563,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_rot90_lane_f32() {
let a: f32x4 = f32x4::new(1., -1., 1., -1.);
let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
@@ -23573,7 +23573,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_rot90_laneq_f32() {
let a: f32x4 = f32x4::new(1., -1., 1., -1.);
let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
@@ -23583,7 +23583,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmla_rot180_lane_f32() {
let a: f32x2 = f32x2::new(1., -1.);
let b: f32x2 = f32x2::new(-1., 1.);
@@ -23593,7 +23593,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmla_rot180_laneq_f32() {
let a: f32x2 = f32x2::new(1., -1.);
let b: f32x2 = f32x2::new(-1., 1.);
@@ -23603,7 +23603,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_rot180_lane_f32() {
let a: f32x4 = f32x4::new(1., -1., 1., -1.);
let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
@@ -23613,7 +23613,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_rot180_laneq_f32() {
let a: f32x4 = f32x4::new(1., -1., 1., -1.);
let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
@@ -23623,7 +23623,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmla_rot270_lane_f32() {
let a: f32x2 = f32x2::new(1., -1.);
let b: f32x2 = f32x2::new(-1., 1.);
@@ -23633,7 +23633,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmla_rot270_laneq_f32() {
let a: f32x2 = f32x2::new(1., -1.);
let b: f32x2 = f32x2::new(-1., 1.);
@@ -23643,7 +23643,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_rot270_lane_f32() {
let a: f32x4 = f32x4::new(1., -1., 1., -1.);
let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
@@ -23653,7 +23653,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,fcma")]
unsafe fn test_vcmlaq_rot270_laneq_f32() {
let a: f32x4 = f32x4::new(1., -1., 1., -1.);
let b: f32x4 = f32x4::new(-1., 1., -1., 1.);
@@ -23663,7 +23663,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdot_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
@@ -23673,7 +23673,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdotq_s32() {
let a: i32x4 = i32x4::new(1, 2, 1, 2);
let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
@@ -23683,7 +23683,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdot_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
@@ -23693,7 +23693,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdotq_u32() {
let a: u32x4 = u32x4::new(1, 2, 1, 2);
let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
@@ -23703,7 +23703,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdot_lane_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
@@ -23713,7 +23713,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdot_laneq_s32() {
let a: i32x2 = i32x2::new(1, 2);
let b: i8x8 = i8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
@@ -23723,7 +23723,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdotq_lane_s32() {
let a: i32x4 = i32x4::new(1, 2, 1, 2);
let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
@@ -23733,7 +23733,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdotq_laneq_s32() {
let a: i32x4 = i32x4::new(1, 2, 1, 2);
let b: i8x16 = i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
@@ -23743,7 +23743,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdot_lane_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
@@ -23753,7 +23753,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdot_laneq_u32() {
let a: u32x2 = u32x2::new(1, 2);
let b: u8x8 = u8x8::new(1, 2, 3, 4, 5, 6, 7, 8);
@@ -23763,7 +23763,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdotq_lane_u32() {
let a: u32x4 = u32x4::new(1, 2, 1, 2);
let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
@@ -23773,7 +23773,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,dotprod")]
unsafe fn test_vdotq_laneq_u32() {
let a: u32x4 = u32x4::new(1, 2, 1, 2);
let b: u8x16 = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
@@ -24864,7 +24864,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlah_s16() {
let a: i16x4 = i16x4::new(1, 1, 1, 1);
let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
@@ -24874,7 +24874,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlahq_s16() {
let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
@@ -24884,7 +24884,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlah_s32() {
let a: i32x2 = i32x2::new(1, 1);
let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
@@ -24894,7 +24894,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlahq_s32() {
let a: i32x4 = i32x4::new(1, 1, 1, 1);
let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
@@ -24904,7 +24904,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlahh_s16() {
let a: i16 = 1;
let b: i16 = 1;
@@ -24914,7 +24914,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlahs_s32() {
let a: i32 = 1;
let b: i32 = 1;
@@ -24924,7 +24924,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlah_lane_s16() {
let a: i16x4 = i16x4::new(1, 1, 1, 1);
let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
@@ -24934,7 +24934,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlah_laneq_s16() {
let a: i16x4 = i16x4::new(1, 1, 1, 1);
let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
@@ -24944,7 +24944,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlahq_lane_s16() {
let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
@@ -24954,7 +24954,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlahq_laneq_s16() {
let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
@@ -24964,7 +24964,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlah_lane_s32() {
let a: i32x2 = i32x2::new(1, 1);
let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
@@ -24974,7 +24974,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlah_laneq_s32() {
let a: i32x2 = i32x2::new(1, 1);
let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
@@ -24984,7 +24984,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlahq_lane_s32() {
let a: i32x4 = i32x4::new(1, 1, 1, 1);
let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
@@ -24994,7 +24994,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlahq_laneq_s32() {
let a: i32x4 = i32x4::new(1, 1, 1, 1);
let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
@@ -25004,7 +25004,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlahh_lane_s16() {
let a: i16 = 1;
let b: i16 = 1;
@@ -25014,7 +25014,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlahh_laneq_s16() {
let a: i16 = 1;
let b: i16 = 1;
@@ -25024,7 +25024,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlahs_lane_s32() {
let a: i32 = 1;
let b: i32 = 1;
@@ -25034,7 +25034,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlahs_laneq_s32() {
let a: i32 = 1;
let b: i32 = 1;
@@ -25044,7 +25044,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlsh_s16() {
let a: i16x4 = i16x4::new(1, 1, 1, 1);
let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
@@ -25054,7 +25054,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlshq_s16() {
let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
@@ -25064,7 +25064,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlsh_s32() {
let a: i32x2 = i32x2::new(1, 1);
let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
@@ -25074,7 +25074,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlshq_s32() {
let a: i32x4 = i32x4::new(1, 1, 1, 1);
let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
@@ -25084,7 +25084,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlshh_s16() {
let a: i16 = 1;
let b: i16 = 1;
@@ -25094,7 +25094,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlshs_s32() {
let a: i32 = 1;
let b: i32 = 1;
@@ -25104,7 +25104,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlsh_lane_s16() {
let a: i16x4 = i16x4::new(1, 1, 1, 1);
let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
@@ -25114,7 +25114,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlsh_laneq_s16() {
let a: i16x4 = i16x4::new(1, 1, 1, 1);
let b: i16x4 = i16x4::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
@@ -25124,7 +25124,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlshq_lane_s16() {
let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
@@ -25134,7 +25134,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlshq_laneq_s16() {
let a: i16x8 = i16x8::new(1, 1, 1, 1, 1, 1, 1, 1);
let b: i16x8 = i16x8::new(0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF, 0x7F_FF);
@@ -25144,7 +25144,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlsh_lane_s32() {
let a: i32x2 = i32x2::new(1, 1);
let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
@@ -25154,7 +25154,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlsh_laneq_s32() {
let a: i32x2 = i32x2::new(1, 1);
let b: i32x2 = i32x2::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
@@ -25164,7 +25164,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlshq_lane_s32() {
let a: i32x4 = i32x4::new(1, 1, 1, 1);
let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
@@ -25174,7 +25174,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlshq_laneq_s32() {
let a: i32x4 = i32x4::new(1, 1, 1, 1);
let b: i32x4 = i32x4::new(0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF, 0x7F_FF_FF_FF);
@@ -25184,7 +25184,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlshh_lane_s16() {
let a: i16 = 1;
let b: i16 = 1;
@@ -25194,7 +25194,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlshh_laneq_s16() {
let a: i16 = 1;
let b: i16 = 1;
@@ -25204,7 +25204,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlshs_lane_s32() {
let a: i32 = 1;
let b: i32 = 1;
@@ -25214,7 +25214,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "rdm")]
unsafe fn test_vqrdmlshs_laneq_s32() {
let a: i32 = 1;
let b: i32 = 1;
diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs
index 7ff26ac21..850657033 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs
@@ -373,8 +373,8 @@ pub unsafe fn vcopy_lane_s64<const N1: i32, const N2: i32>(
_a: int64x1_t,
b: int64x1_t,
) -> int64x1_t {
- static_assert!(N1 : i32 where N1 == 0);
- static_assert!(N2 : i32 where N2 == 0);
+ static_assert!(N1 == 0);
+ static_assert!(N2 == 0);
b
}
@@ -388,8 +388,8 @@ pub unsafe fn vcopy_lane_u64<const N1: i32, const N2: i32>(
_a: uint64x1_t,
b: uint64x1_t,
) -> uint64x1_t {
- static_assert!(N1 : i32 where N1 == 0);
- static_assert!(N2 : i32 where N2 == 0);
+ static_assert!(N1 == 0);
+ static_assert!(N2 == 0);
b
}
@@ -403,8 +403,8 @@ pub unsafe fn vcopy_lane_p64<const N1: i32, const N2: i32>(
_a: poly64x1_t,
b: poly64x1_t,
) -> poly64x1_t {
- static_assert!(N1 : i32 where N1 == 0);
- static_assert!(N2 : i32 where N2 == 0);
+ static_assert!(N1 == 0);
+ static_assert!(N2 == 0);
b
}
@@ -418,8 +418,8 @@ pub unsafe fn vcopy_lane_f64<const N1: i32, const N2: i32>(
_a: float64x1_t,
b: float64x1_t,
) -> float64x1_t {
- static_assert!(N1 : i32 where N1 == 0);
- static_assert!(N2 : i32 where N2 == 0);
+ static_assert!(N1 == 0);
+ static_assert!(N2 == 0);
b
}
@@ -433,8 +433,8 @@ pub unsafe fn vcopy_laneq_s64<const LANE1: i32, const LANE2: i32>(
_a: int64x1_t,
b: int64x2_t,
) -> int64x1_t {
- static_assert!(LANE1 : i32 where LANE1 == 0);
- static_assert_imm1!(LANE2);
+ static_assert!(LANE1 == 0);
+ static_assert_uimm_bits!(LANE2, 1);
transmute::<i64, _>(simd_extract(b, LANE2 as u32))
}
@@ -448,8 +448,8 @@ pub unsafe fn vcopy_laneq_u64<const LANE1: i32, const LANE2: i32>(
_a: uint64x1_t,
b: uint64x2_t,
) -> uint64x1_t {
- static_assert!(LANE1 : i32 where LANE1 == 0);
- static_assert_imm1!(LANE2);
+ static_assert!(LANE1 == 0);
+ static_assert_uimm_bits!(LANE2, 1);
transmute::<u64, _>(simd_extract(b, LANE2 as u32))
}
@@ -463,8 +463,8 @@ pub unsafe fn vcopy_laneq_p64<const LANE1: i32, const LANE2: i32>(
_a: poly64x1_t,
b: poly64x2_t,
) -> poly64x1_t {
- static_assert!(LANE1 : i32 where LANE1 == 0);
- static_assert_imm1!(LANE2);
+ static_assert!(LANE1 == 0);
+ static_assert_uimm_bits!(LANE2, 1);
transmute::<u64, _>(simd_extract(b, LANE2 as u32))
}
@@ -478,8 +478,8 @@ pub unsafe fn vcopy_laneq_f64<const LANE1: i32, const LANE2: i32>(
_a: float64x1_t,
b: float64x2_t,
) -> float64x1_t {
- static_assert!(LANE1 : i32 where LANE1 == 0);
- static_assert_imm1!(LANE2);
+ static_assert!(LANE1 == 0);
+ static_assert_uimm_bits!(LANE2, 1);
transmute::<f64, _>(simd_extract(b, LANE2 as u32))
}
@@ -737,7 +737,7 @@ pub unsafe fn vld1_dup_f64(ptr: *const f64) -> float64x1_t {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld1q_dup_f64(ptr: *const f64) -> float64x2_t {
let x = vld1q_lane_f64::<0>(ptr, transmute(f64x2::splat(0.)));
- simd_shuffle2!(x, x, [0, 0])
+ simd_shuffle!(x, x, [0, 0])
}
/// Load one single-element structure to one lane of one register.
@@ -747,7 +747,7 @@ pub unsafe fn vld1q_dup_f64(ptr: *const f64) -> float64x2_t {
#[cfg_attr(test, assert_instr(ldr, LANE = 0))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld1_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x1_t) -> float64x1_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
simd_insert(src, LANE as u32, *ptr)
}
@@ -758,7 +758,7 @@ pub unsafe fn vld1_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x1_t)
#[cfg_attr(test, assert_instr(ld1, LANE = 1))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld1q_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x2_t) -> float64x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1950,7 +1950,7 @@ pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vext_p64<const N: i32>(a: poly64x1_t, _b: poly64x1_t) -> poly64x1_t {
- static_assert!(N : i32 where N == 0);
+ static_assert!(N == 0);
a
}
@@ -1961,7 +1961,7 @@ pub unsafe fn vext_p64<const N: i32>(a: poly64x1_t, _b: poly64x1_t) -> poly64x1_
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vext_f64<const N: i32>(a: float64x1_t, _b: float64x1_t) -> float64x1_t {
- static_assert!(N : i32 where N == 0);
+ static_assert!(N == 0);
a
}
@@ -2080,7 +2080,7 @@ pub unsafe fn vget_low_p64(a: poly64x2_t) -> poly64x1_t {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, IMM5 = 0))]
pub unsafe fn vget_lane_f64<const IMM5: i32>(v: float64x1_t) -> f64 {
- static_assert!(IMM5 : i32 where IMM5 == 0);
+ static_assert!(IMM5 == 0);
simd_extract(v, IMM5 as u32)
}
@@ -2091,7 +2091,7 @@ pub unsafe fn vget_lane_f64<const IMM5: i32>(v: float64x1_t) -> f64 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, IMM5 = 0))]
pub unsafe fn vgetq_lane_f64<const IMM5: i32>(v: float64x2_t) -> f64 {
- static_assert_imm1!(IMM5);
+ static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
@@ -2101,7 +2101,7 @@ pub unsafe fn vgetq_lane_f64<const IMM5: i32>(v: float64x2_t) -> f64 {
#[cfg_attr(test, assert_instr(mov))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcombine_f64(low: float64x1_t, high: float64x1_t) -> float64x2_t {
- simd_shuffle2!(low, high, [0, 1])
+ simd_shuffle!(low, high, [0, 1])
}
/// Table look-up
@@ -3001,7 +3001,7 @@ pub unsafe fn vqtbx4q_p8(a: poly8x16_t, t: poly8x16x4_t, idx: uint8x16_t) -> pol
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshld_n_s64<const N: i32>(a: i64) -> i64 {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
a << N
}
@@ -3012,7 +3012,7 @@ pub unsafe fn vshld_n_s64<const N: i32>(a: i64) -> i64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshld_n_u64<const N: i32>(a: u64) -> u64 {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
a << N
}
@@ -3023,7 +3023,7 @@ pub unsafe fn vshld_n_u64<const N: i32>(a: u64) -> u64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshrd_n_s64<const N: i32>(a: i64) -> i64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
let n: i32 = if N == 64 { 63 } else { N };
a >> n
}
@@ -3035,7 +3035,7 @@ pub unsafe fn vshrd_n_s64<const N: i32>(a: i64) -> i64 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vshrd_n_u64<const N: i32>(a: u64) -> u64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
let n: i32 = if N == 64 {
return 0;
} else {
@@ -3051,7 +3051,7 @@ pub unsafe fn vshrd_n_u64<const N: i32>(a: u64) -> u64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
a.wrapping_add(vshrd_n_s64::<N>(b))
}
@@ -3062,7 +3062,7 @@ pub unsafe fn vsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
a.wrapping_add(vshrd_n_u64::<N>(b))
}
@@ -3073,7 +3073,7 @@ pub unsafe fn vsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
vsli_n_s8_(a, b, N)
}
/// Shift Left and Insert (immediate)
@@ -3083,7 +3083,7 @@ pub unsafe fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
vsliq_n_s8_(a, b, N)
}
/// Shift Left and Insert (immediate)
@@ -3093,7 +3093,7 @@ pub unsafe fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
vsli_n_s16_(a, b, N)
}
/// Shift Left and Insert (immediate)
@@ -3103,7 +3103,7 @@ pub unsafe fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
vsliq_n_s16_(a, b, N)
}
/// Shift Left and Insert (immediate)
@@ -3113,7 +3113,7 @@ pub unsafe fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- static_assert!(N: i32 where N >= 0 && N <= 31);
+ static_assert!(N >= 0 && N <= 31);
vsli_n_s32_(a, b, N)
}
/// Shift Left and Insert (immediate)
@@ -3123,7 +3123,7 @@ pub unsafe fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- static_assert!(N: i32 where N >= 0 && N <= 31);
+ static_assert!(N >= 0 && N <= 31);
vsliq_n_s32_(a, b, N)
}
/// Shift Left and Insert (immediate)
@@ -3133,7 +3133,7 @@ pub unsafe fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
- static_assert!(N: i32 where N >= 0 && N <= 63);
+ static_assert!(N >= 0 && N <= 63);
vsli_n_s64_(a, b, N)
}
/// Shift Left and Insert (immediate)
@@ -3143,7 +3143,7 @@ pub unsafe fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- static_assert!(N: i32 where N >= 0 && N <= 63);
+ static_assert!(N >= 0 && N <= 63);
vsliq_n_s64_(a, b, N)
}
/// Shift Left and Insert (immediate)
@@ -3153,7 +3153,7 @@ pub unsafe fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
transmute(vsli_n_s8_(transmute(a), transmute(b), N))
}
/// Shift Left and Insert (immediate)
@@ -3163,7 +3163,7 @@ pub unsafe fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
transmute(vsliq_n_s8_(transmute(a), transmute(b), N))
}
/// Shift Left and Insert (immediate)
@@ -3173,7 +3173,7 @@ pub unsafe fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
transmute(vsli_n_s16_(transmute(a), transmute(b), N))
}
/// Shift Left and Insert (immediate)
@@ -3183,7 +3183,7 @@ pub unsafe fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
transmute(vsliq_n_s16_(transmute(a), transmute(b), N))
}
/// Shift Left and Insert (immediate)
@@ -3193,7 +3193,7 @@ pub unsafe fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- static_assert!(N: i32 where N >= 0 && N <= 31);
+ static_assert!(N >= 0 && N <= 31);
transmute(vsli_n_s32_(transmute(a), transmute(b), N))
}
/// Shift Left and Insert (immediate)
@@ -3203,7 +3203,7 @@ pub unsafe fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- static_assert!(N: i32 where N >= 0 && N <= 31);
+ static_assert!(N >= 0 && N <= 31);
transmute(vsliq_n_s32_(transmute(a), transmute(b), N))
}
/// Shift Left and Insert (immediate)
@@ -3213,7 +3213,7 @@ pub unsafe fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
- static_assert!(N: i32 where N >= 0 && N <= 63);
+ static_assert!(N >= 0 && N <= 63);
transmute(vsli_n_s64_(transmute(a), transmute(b), N))
}
/// Shift Left and Insert (immediate)
@@ -3223,7 +3223,7 @@ pub unsafe fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- static_assert!(N: i32 where N >= 0 && N <= 63);
+ static_assert!(N >= 0 && N <= 63);
transmute(vsliq_n_s64_(transmute(a), transmute(b), N))
}
/// Shift Left and Insert (immediate)
@@ -3233,7 +3233,7 @@ pub unsafe fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
transmute(vsli_n_s8_(transmute(a), transmute(b), N))
}
/// Shift Left and Insert (immediate)
@@ -3243,7 +3243,7 @@ pub unsafe fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
transmute(vsliq_n_s8_(transmute(a), transmute(b), N))
}
/// Shift Left and Insert (immediate)
@@ -3253,7 +3253,7 @@ pub unsafe fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
transmute(vsli_n_s16_(transmute(a), transmute(b), N))
}
/// Shift Left and Insert (immediate)
@@ -3263,7 +3263,7 @@ pub unsafe fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
transmute(vsliq_n_s16_(transmute(a), transmute(b), N))
}
@@ -3276,7 +3276,7 @@ pub unsafe fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
- static_assert!(N: i32 where N >= 0 && N <= 63);
+ static_assert!(N >= 0 && N <= 63);
transmute(vsli_n_s64_(transmute(a), transmute(b), N))
}
@@ -3289,7 +3289,7 @@ pub unsafe fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
- static_assert!(N: i32 where N >= 0 && N <= 63);
+ static_assert!(N >= 0 && N <= 63);
transmute(vsliq_n_s64_(transmute(a), transmute(b), N))
}
/// Shift Right and Insert (immediate)
@@ -3299,7 +3299,7 @@ pub unsafe fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- static_assert!(N: i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
vsri_n_s8_(a, b, N)
}
/// Shift Right and Insert (immediate)
@@ -3309,7 +3309,7 @@ pub unsafe fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- static_assert!(N: i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
vsriq_n_s8_(a, b, N)
}
/// Shift Right and Insert (immediate)
@@ -3319,7 +3319,7 @@ pub unsafe fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- static_assert!(N: i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
vsri_n_s16_(a, b, N)
}
/// Shift Right and Insert (immediate)
@@ -3329,7 +3329,7 @@ pub unsafe fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- static_assert!(N: i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
vsriq_n_s16_(a, b, N)
}
/// Shift Right and Insert (immediate)
@@ -3339,7 +3339,7 @@ pub unsafe fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- static_assert!(N: i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
vsri_n_s32_(a, b, N)
}
/// Shift Right and Insert (immediate)
@@ -3349,7 +3349,7 @@ pub unsafe fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- static_assert!(N: i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
vsriq_n_s32_(a, b, N)
}
/// Shift Right and Insert (immediate)
@@ -3359,7 +3359,7 @@ pub unsafe fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
- static_assert!(N: i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
vsri_n_s64_(a, b, N)
}
/// Shift Right and Insert (immediate)
@@ -3369,7 +3369,7 @@ pub unsafe fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- static_assert!(N: i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
vsriq_n_s64_(a, b, N)
}
/// Shift Right and Insert (immediate)
@@ -3379,7 +3379,7 @@ pub unsafe fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- static_assert!(N: i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
transmute(vsri_n_s8_(transmute(a), transmute(b), N))
}
/// Shift Right and Insert (immediate)
@@ -3389,7 +3389,7 @@ pub unsafe fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- static_assert!(N: i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
transmute(vsriq_n_s8_(transmute(a), transmute(b), N))
}
/// Shift Right and Insert (immediate)
@@ -3399,7 +3399,7 @@ pub unsafe fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- static_assert!(N: i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
transmute(vsri_n_s16_(transmute(a), transmute(b), N))
}
/// Shift Right and Insert (immediate)
@@ -3409,7 +3409,7 @@ pub unsafe fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- static_assert!(N: i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
transmute(vsriq_n_s16_(transmute(a), transmute(b), N))
}
/// Shift Right and Insert (immediate)
@@ -3419,7 +3419,7 @@ pub unsafe fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- static_assert!(N: i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
transmute(vsri_n_s32_(transmute(a), transmute(b), N))
}
/// Shift Right and Insert (immediate)
@@ -3429,7 +3429,7 @@ pub unsafe fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- static_assert!(N: i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
transmute(vsriq_n_s32_(transmute(a), transmute(b), N))
}
/// Shift Right and Insert (immediate)
@@ -3439,7 +3439,7 @@ pub unsafe fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
- static_assert!(N: i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
transmute(vsri_n_s64_(transmute(a), transmute(b), N))
}
/// Shift Right and Insert (immediate)
@@ -3449,7 +3449,7 @@ pub unsafe fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- static_assert!(N: i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
transmute(vsriq_n_s64_(transmute(a), transmute(b), N))
}
/// Shift Right and Insert (immediate)
@@ -3459,7 +3459,7 @@ pub unsafe fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
- static_assert!(N: i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
transmute(vsri_n_s8_(transmute(a), transmute(b), N))
}
/// Shift Right and Insert (immediate)
@@ -3469,7 +3469,7 @@ pub unsafe fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
- static_assert!(N: i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
transmute(vsriq_n_s8_(transmute(a), transmute(b), N))
}
/// Shift Right and Insert (immediate)
@@ -3479,7 +3479,7 @@ pub unsafe fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
- static_assert!(N: i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
transmute(vsri_n_s16_(transmute(a), transmute(b), N))
}
/// Shift Right and Insert (immediate)
@@ -3489,7 +3489,7 @@ pub unsafe fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
- static_assert!(N: i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
transmute(vsriq_n_s16_(transmute(a), transmute(b), N))
}
@@ -3502,7 +3502,7 @@ pub unsafe fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
- static_assert!(N: i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
transmute(vsri_n_s64_(transmute(a), transmute(b), N))
}
@@ -3515,7 +3515,7 @@ pub unsafe fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
- static_assert!(N: i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
transmute(vsriq_n_s64_(transmute(a), transmute(b), N))
}
@@ -3529,7 +3529,7 @@ pub unsafe fn vsm3tt1aq_u32<const IMM2: i32>(
b: uint32x4_t,
c: uint32x4_t,
) -> uint32x4_t {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3tt1a")]
@@ -3548,7 +3548,7 @@ pub unsafe fn vsm3tt1bq_u32<const IMM2: i32>(
b: uint32x4_t,
c: uint32x4_t,
) -> uint32x4_t {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3tt1b")]
@@ -3567,7 +3567,7 @@ pub unsafe fn vsm3tt2aq_u32<const IMM2: i32>(
b: uint32x4_t,
c: uint32x4_t,
) -> uint32x4_t {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3tt2a")]
@@ -3586,7 +3586,7 @@ pub unsafe fn vsm3tt2bq_u32<const IMM2: i32>(
b: uint32x4_t,
c: uint32x4_t,
) -> uint32x4_t {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.sm3tt2b")]
@@ -3601,7 +3601,7 @@ pub unsafe fn vsm3tt2bq_u32<const IMM2: i32>(
#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- static_assert_imm6!(IMM6);
+ static_assert_uimm_bits!(IMM6, 6);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.crypto.xar")]
diff --git a/library/stdarch/crates/core_arch/src/aarch64/prefetch.rs b/library/stdarch/crates/core_arch/src/aarch64/prefetch.rs
index 3ae0ef506..0e2e39cc2 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/prefetch.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/prefetch.rs
@@ -67,7 +67,7 @@ pub const _PREFETCH_LOCALITY3: i32 = 3;
// FIXME: Replace this with the standard ACLE __pld/__pldx/__pli/__plix intrinsics
pub unsafe fn _prefetch<const RW: i32, const LOCALITY: i32>(p: *const i8) {
// We use the `llvm.prefetch` intrinsic with `cache type` = 1 (data cache).
- static_assert_imm1!(RW);
- static_assert_imm2!(LOCALITY);
+ static_assert_uimm_bits!(RW, 1);
+ static_assert_uimm_bits!(LOCALITY, 2);
prefetch(p, RW, LOCALITY, 1);
}
diff --git a/library/stdarch/crates/core_arch/src/aarch64/tme.rs b/library/stdarch/crates/core_arch/src/aarch64/tme.rs
index d1b2cf334..05df313e4 100644
--- a/library/stdarch/crates/core_arch/src/aarch64/tme.rs
+++ b/library/stdarch/crates/core_arch/src/aarch64/tme.rs
@@ -96,7 +96,7 @@ pub unsafe fn __tcommit() {
#[cfg_attr(test, assert_instr(tcancel, IMM16 = 0x0))]
#[rustc_legacy_const_generics(0)]
pub unsafe fn __tcancel<const IMM16: u64>() {
- static_assert!(IMM16: u64 where IMM16 <= 65535);
+ static_assert!(IMM16 <= 65535);
aarch64_tcancel(IMM16);
}
diff --git a/library/stdarch/crates/core_arch/src/arm/armclang.rs b/library/stdarch/crates/core_arch/src/arm/armclang.rs
index e68c02d02..e44ee2f4a 100644
--- a/library/stdarch/crates/core_arch/src/arm/armclang.rs
+++ b/library/stdarch/crates/core_arch/src/arm/armclang.rs
@@ -30,6 +30,6 @@ use stdarch_test::assert_instr;
#[inline(always)]
#[rustc_legacy_const_generics(0)]
pub unsafe fn __breakpoint<const VAL: i32>() {
- static_assert_imm8!(VAL);
+ static_assert_uimm_bits!(VAL, 8);
crate::arch::asm!("bkpt #{}", const VAL);
}
diff --git a/library/stdarch/crates/core_arch/src/arm/mod.rs b/library/stdarch/crates/core_arch/src/arm/mod.rs
index efe0068d4..ec91e5de5 100644
--- a/library/stdarch/crates/core_arch/src/arm/mod.rs
+++ b/library/stdarch/crates/core_arch/src/arm/mod.rs
@@ -103,7 +103,7 @@ pub unsafe fn udf() -> ! {
#[inline(always)]
#[rustc_legacy_const_generics(0)]
pub unsafe fn __dbg<const IMM4: i32>() {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
dbg(IMM4);
}
diff --git a/library/stdarch/crates/core_arch/src/arm/neon.rs b/library/stdarch/crates/core_arch/src/arm/neon.rs
index a6291c95c..e1de48538 100644
--- a/library/stdarch/crates/core_arch/src/arm/neon.rs
+++ b/library/stdarch/crates/core_arch/src/arm/neon.rs
@@ -821,7 +821,7 @@ pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t {
#[cfg_attr(test, assert_instr("vsli.8", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
let n = N as i8;
vshiftins_v8i8(a, b, int8x8_t(n, n, n, n, n, n, n, n))
}
@@ -831,7 +831,7 @@ pub unsafe fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(test, assert_instr("vsli.8", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
let n = N as i8;
vshiftins_v16i8(
a,
@@ -845,7 +845,7 @@ pub unsafe fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t
#[cfg_attr(test, assert_instr("vsli.16", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
let n = N as i16;
vshiftins_v4i16(a, b, int16x4_t(n, n, n, n))
}
@@ -855,7 +855,7 @@ pub unsafe fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t
#[cfg_attr(test, assert_instr("vsli.16", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
let n = N as i16;
vshiftins_v8i16(a, b, int16x8_t(n, n, n, n, n, n, n, n))
}
@@ -865,7 +865,7 @@ pub unsafe fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t
#[cfg_attr(test, assert_instr("vsli.32", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- static_assert!(N: i32 where N >= 0 && N <= 31);
+ static_assert!(N >= 0 && N <= 31);
vshiftins_v2i32(a, b, int32x2_t(N, N))
}
/// Shift Left and Insert (immediate)
@@ -874,7 +874,7 @@ pub unsafe fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t
#[cfg_attr(test, assert_instr("vsli.32", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- static_assert!(N: i32 where N >= 0 && N <= 31);
+ static_assert!(N >= 0 && N <= 31);
vshiftins_v4i32(a, b, int32x4_t(N, N, N, N))
}
/// Shift Left and Insert (immediate)
@@ -883,7 +883,7 @@ pub unsafe fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t
#[cfg_attr(test, assert_instr("vsli.64", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
- static_assert!(N : i32 where 0 <= N && N <= 63);
+ static_assert!(0 <= N && N <= 63);
vshiftins_v1i64(a, b, int64x1_t(N as i64))
}
/// Shift Left and Insert (immediate)
@@ -892,7 +892,7 @@ pub unsafe fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t
#[cfg_attr(test, assert_instr("vsli.64", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- static_assert!(N : i32 where 0 <= N && N <= 63);
+ static_assert!(0 <= N && N <= 63);
vshiftins_v2i64(a, b, int64x2_t(N as i64, N as i64))
}
/// Shift Left and Insert (immediate)
@@ -901,7 +901,7 @@ pub unsafe fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t
#[cfg_attr(test, assert_instr("vsli.8", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
let n = N as i8;
transmute(vshiftins_v8i8(
transmute(a),
@@ -915,7 +915,7 @@ pub unsafe fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(test, assert_instr("vsli.8", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
let n = N as i8;
transmute(vshiftins_v16i8(
transmute(a),
@@ -929,7 +929,7 @@ pub unsafe fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16
#[cfg_attr(test, assert_instr("vsli.16", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
let n = N as i16;
transmute(vshiftins_v4i16(
transmute(a),
@@ -943,7 +943,7 @@ pub unsafe fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4
#[cfg_attr(test, assert_instr("vsli.16", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
let n = N as i16;
transmute(vshiftins_v8i16(
transmute(a),
@@ -957,7 +957,7 @@ pub unsafe fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x
#[cfg_attr(test, assert_instr("vsli.32", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- static_assert!(N: i32 where N >= 0 && N <= 31);
+ static_assert!(N >= 0 && N <= 31);
transmute(vshiftins_v2i32(transmute(a), transmute(b), int32x2_t(N, N)))
}
/// Shift Left and Insert (immediate)
@@ -966,7 +966,7 @@ pub unsafe fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2
#[cfg_attr(test, assert_instr("vsli.32", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- static_assert!(N: i32 where N >= 0 && N <= 31);
+ static_assert!(N >= 0 && N <= 31);
transmute(vshiftins_v4i32(
transmute(a),
transmute(b),
@@ -979,7 +979,7 @@ pub unsafe fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x
#[cfg_attr(test, assert_instr("vsli.64", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
- static_assert!(N : i32 where 0 <= N && N <= 63);
+ static_assert!(0 <= N && N <= 63);
transmute(vshiftins_v1i64(
transmute(a),
transmute(b),
@@ -992,7 +992,7 @@ pub unsafe fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1
#[cfg_attr(test, assert_instr("vsli.64", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- static_assert!(N : i32 where 0 <= N && N <= 63);
+ static_assert!(0 <= N && N <= 63);
transmute(vshiftins_v2i64(
transmute(a),
transmute(b),
@@ -1005,7 +1005,7 @@ pub unsafe fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x
#[cfg_attr(test, assert_instr("vsli.8", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
let n = N as i8;
transmute(vshiftins_v8i8(
transmute(a),
@@ -1019,7 +1019,7 @@ pub unsafe fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
#[cfg_attr(test, assert_instr("vsli.8", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
let n = N as i8;
transmute(vshiftins_v16i8(
transmute(a),
@@ -1033,7 +1033,7 @@ pub unsafe fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16
#[cfg_attr(test, assert_instr("vsli.16", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
let n = N as i16;
transmute(vshiftins_v4i16(
transmute(a),
@@ -1048,7 +1048,7 @@ pub unsafe fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4
#[cfg_attr(test, assert_instr("vsli.16", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
let n = N as i16;
transmute(vshiftins_v8i16(
transmute(a),
@@ -1065,7 +1065,7 @@ pub unsafe fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x
#[cfg_attr(test, assert_instr("vsli.64", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
- static_assert!(N : i32 where 0 <= N && N <= 63);
+ static_assert!(0 <= N && N <= 63);
transmute(vshiftins_v1i64(
transmute(a),
transmute(b),
@@ -1081,7 +1081,7 @@ pub unsafe fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1
#[cfg_attr(test, assert_instr("vsli.64", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
- static_assert!(N : i32 where 0 <= N && N <= 63);
+ static_assert!(0 <= N && N <= 63);
transmute(vshiftins_v2i64(
transmute(a),
transmute(b),
@@ -1094,7 +1094,7 @@ pub unsafe fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x
#[cfg_attr(test, assert_instr("vsri.8", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- static_assert!(N : i32 where 1 <= N && N <= 8);
+ static_assert!(1 <= N && N <= 8);
let n = -N as i8;
vshiftins_v8i8(a, b, int8x8_t(n, n, n, n, n, n, n, n))
}
@@ -1104,7 +1104,7 @@ pub unsafe fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[cfg_attr(test, assert_instr("vsri.8", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- static_assert!(N : i32 where 1 <= N && N <= 8);
+ static_assert!(1 <= N && N <= 8);
let n = -N as i8;
vshiftins_v16i8(
a,
@@ -1118,7 +1118,7 @@ pub unsafe fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t
#[cfg_attr(test, assert_instr("vsri.16", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- static_assert!(N : i32 where 1 <= N && N <= 16);
+ static_assert!(1 <= N && N <= 16);
let n = -N as i16;
vshiftins_v4i16(a, b, int16x4_t(n, n, n, n))
}
@@ -1128,7 +1128,7 @@ pub unsafe fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t
#[cfg_attr(test, assert_instr("vsri.16", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- static_assert!(N : i32 where 1 <= N && N <= 16);
+ static_assert!(1 <= N && N <= 16);
let n = -N as i16;
vshiftins_v8i16(a, b, int16x8_t(n, n, n, n, n, n, n, n))
}
@@ -1138,7 +1138,7 @@ pub unsafe fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t
#[cfg_attr(test, assert_instr("vsri.32", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- static_assert!(N : i32 where 1 <= N && N <= 32);
+ static_assert!(1 <= N && N <= 32);
vshiftins_v2i32(a, b, int32x2_t(-N, -N))
}
/// Shift Right and Insert (immediate)
@@ -1147,7 +1147,7 @@ pub unsafe fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t
#[cfg_attr(test, assert_instr("vsri.32", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- static_assert!(N : i32 where 1 <= N && N <= 32);
+ static_assert!(1 <= N && N <= 32);
vshiftins_v4i32(a, b, int32x4_t(-N, -N, -N, -N))
}
/// Shift Right and Insert (immediate)
@@ -1156,7 +1156,7 @@ pub unsafe fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t
#[cfg_attr(test, assert_instr("vsri.64", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
- static_assert!(N : i32 where 1 <= N && N <= 64);
+ static_assert!(1 <= N && N <= 64);
vshiftins_v1i64(a, b, int64x1_t(-N as i64))
}
/// Shift Right and Insert (immediate)
@@ -1165,7 +1165,7 @@ pub unsafe fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t
#[cfg_attr(test, assert_instr("vsri.64", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- static_assert!(N : i32 where 1 <= N && N <= 64);
+ static_assert!(1 <= N && N <= 64);
vshiftins_v2i64(a, b, int64x2_t(-N as i64, -N as i64))
}
/// Shift Right and Insert (immediate)
@@ -1174,7 +1174,7 @@ pub unsafe fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t
#[cfg_attr(test, assert_instr("vsri.8", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- static_assert!(N : i32 where 1 <= N && N <= 8);
+ static_assert!(1 <= N && N <= 8);
let n = -N as i8;
transmute(vshiftins_v8i8(
transmute(a),
@@ -1188,7 +1188,7 @@ pub unsafe fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[cfg_attr(test, assert_instr("vsri.8", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- static_assert!(N : i32 where 1 <= N && N <= 8);
+ static_assert!(1 <= N && N <= 8);
let n = -N as i8;
transmute(vshiftins_v16i8(
transmute(a),
@@ -1202,7 +1202,7 @@ pub unsafe fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16
#[cfg_attr(test, assert_instr("vsri.16", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- static_assert!(N : i32 where 1 <= N && N <= 16);
+ static_assert!(1 <= N && N <= 16);
let n = -N as i16;
transmute(vshiftins_v4i16(
transmute(a),
@@ -1216,7 +1216,7 @@ pub unsafe fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4
#[cfg_attr(test, assert_instr("vsri.16", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- static_assert!(N : i32 where 1 <= N && N <= 16);
+ static_assert!(1 <= N && N <= 16);
let n = -N as i16;
transmute(vshiftins_v8i16(
transmute(a),
@@ -1230,7 +1230,7 @@ pub unsafe fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x
#[cfg_attr(test, assert_instr("vsri.32", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- static_assert!(N : i32 where 1 <= N && N <= 32);
+ static_assert!(1 <= N && N <= 32);
transmute(vshiftins_v2i32(
transmute(a),
transmute(b),
@@ -1243,7 +1243,7 @@ pub unsafe fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2
#[cfg_attr(test, assert_instr("vsri.32", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- static_assert!(N : i32 where 1 <= N && N <= 32);
+ static_assert!(1 <= N && N <= 32);
transmute(vshiftins_v4i32(
transmute(a),
transmute(b),
@@ -1256,7 +1256,7 @@ pub unsafe fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x
#[cfg_attr(test, assert_instr("vsri.64", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
- static_assert!(N : i32 where 1 <= N && N <= 64);
+ static_assert!(1 <= N && N <= 64);
transmute(vshiftins_v1i64(
transmute(a),
transmute(b),
@@ -1269,7 +1269,7 @@ pub unsafe fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1
#[cfg_attr(test, assert_instr("vsri.64", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- static_assert!(N : i32 where 1 <= N && N <= 64);
+ static_assert!(1 <= N && N <= 64);
transmute(vshiftins_v2i64(
transmute(a),
transmute(b),
@@ -1282,7 +1282,7 @@ pub unsafe fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x
#[cfg_attr(test, assert_instr("vsri.8", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
- static_assert!(N : i32 where 1 <= N && N <= 8);
+ static_assert!(1 <= N && N <= 8);
let n = -N as i8;
transmute(vshiftins_v8i8(
transmute(a),
@@ -1296,7 +1296,7 @@ pub unsafe fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
#[cfg_attr(test, assert_instr("vsri.8", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
- static_assert!(N : i32 where 1 <= N && N <= 8);
+ static_assert!(1 <= N && N <= 8);
let n = -N as i8;
transmute(vshiftins_v16i8(
transmute(a),
@@ -1310,7 +1310,7 @@ pub unsafe fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16
#[cfg_attr(test, assert_instr("vsri.16", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
- static_assert!(N : i32 where 1 <= N && N <= 16);
+ static_assert!(1 <= N && N <= 16);
let n = -N as i16;
transmute(vshiftins_v4i16(
transmute(a),
@@ -1324,7 +1324,7 @@ pub unsafe fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4
#[cfg_attr(test, assert_instr("vsri.16", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
- static_assert!(N : i32 where 1 <= N && N <= 16);
+ static_assert!(1 <= N && N <= 16);
let n = -N as i16;
transmute(vshiftins_v8i16(
transmute(a),
@@ -1341,7 +1341,7 @@ pub unsafe fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x
#[cfg_attr(test, assert_instr("vsri.64", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
- static_assert!(N : i32 where 1 <= N && N <= 64);
+ static_assert!(1 <= N && N <= 64);
transmute(vshiftins_v1i64(
transmute(a),
transmute(b),
@@ -1357,7 +1357,7 @@ pub unsafe fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1
#[cfg_attr(test, assert_instr("vsri.64", N = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
- static_assert!(N : i32 where 1 <= N && N <= 64);
+ static_assert!(1 <= N && N <= 64);
transmute(vshiftins_v2i64(
transmute(a),
transmute(b),
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs
index fe473c51e..775811e65 100644
--- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs
+++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs
@@ -2793,7 +2793,7 @@ pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -2875,7 +2875,7 @@ pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t {
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvt_n_f32_s32<const N: i32>(a: int32x2_t) -> float32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32")]
@@ -2894,7 +2894,7 @@ vcvt_n_f32_s32_(a, N)
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_n_f32_s32<const N: i32>(a: int32x2_t) -> float32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32")]
@@ -2912,7 +2912,7 @@ vcvt_n_f32_s32_(a, N)
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvtq_n_f32_s32<const N: i32>(a: int32x4_t) -> float32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32")]
@@ -2931,7 +2931,7 @@ vcvtq_n_f32_s32_(a, N)
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtq_n_f32_s32<const N: i32>(a: int32x4_t) -> float32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32")]
@@ -2949,7 +2949,7 @@ vcvtq_n_f32_s32_(a, N)
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvt_n_f32_u32<const N: i32>(a: uint32x2_t) -> float32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32")]
@@ -2968,7 +2968,7 @@ vcvt_n_f32_u32_(a, N)
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_n_f32_u32<const N: i32>(a: uint32x2_t) -> float32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32")]
@@ -2986,7 +2986,7 @@ vcvt_n_f32_u32_(a, N)
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvtq_n_f32_u32<const N: i32>(a: uint32x4_t) -> float32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32")]
@@ -3005,7 +3005,7 @@ vcvtq_n_f32_u32_(a, N)
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtq_n_f32_u32<const N: i32>(a: uint32x4_t) -> float32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32")]
@@ -3023,7 +3023,7 @@ vcvtq_n_f32_u32_(a, N)
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvt_n_s32_f32<const N: i32>(a: float32x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32")]
@@ -3042,7 +3042,7 @@ vcvt_n_s32_f32_(a, N)
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_n_s32_f32<const N: i32>(a: float32x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32")]
@@ -3060,7 +3060,7 @@ vcvt_n_s32_f32_(a, N)
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvtq_n_s32_f32<const N: i32>(a: float32x4_t) -> int32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32")]
@@ -3079,7 +3079,7 @@ vcvtq_n_s32_f32_(a, N)
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtq_n_s32_f32<const N: i32>(a: float32x4_t) -> int32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32")]
@@ -3097,7 +3097,7 @@ vcvtq_n_s32_f32_(a, N)
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvt_n_u32_f32<const N: i32>(a: float32x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32")]
@@ -3116,7 +3116,7 @@ vcvt_n_u32_f32_(a, N)
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvt_n_u32_f32<const N: i32>(a: float32x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32")]
@@ -3134,7 +3134,7 @@ vcvt_n_u32_f32_(a, N)
#[cfg_attr(test, assert_instr(vcvt, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vcvtq_n_u32_f32<const N: i32>(a: float32x4_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32")]
@@ -3153,7 +3153,7 @@ vcvtq_n_u32_f32_(a, N)
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcvtq_n_u32_f32<const N: i32>(a: float32x4_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32")]
@@ -3249,8 +3249,8 @@ vcvtq_u32_f32_(a)
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
- static_assert_imm3!(N);
- simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 3);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3264,8 +3264,8 @@ pub unsafe fn vdup_lane_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
- static_assert_imm4!(N);
- simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 4);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3279,8 +3279,8 @@ pub unsafe fn vdupq_laneq_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
- static_assert_imm2!(N);
- simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 2);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3294,8 +3294,8 @@ pub unsafe fn vdup_lane_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
- static_assert_imm3!(N);
- simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 3);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3309,8 +3309,8 @@ pub unsafe fn vdupq_laneq_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
- static_assert_imm1!(N);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert_uimm_bits!(N, 1);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3324,8 +3324,8 @@ pub unsafe fn vdup_lane_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
- static_assert_imm2!(N);
- simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 2);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3339,8 +3339,8 @@ pub unsafe fn vdupq_laneq_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_s8<const N: i32>(a: int8x16_t) -> int8x8_t {
- static_assert_imm4!(N);
- simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 4);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3354,8 +3354,8 @@ pub unsafe fn vdup_laneq_s8<const N: i32>(a: int8x16_t) -> int8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_s16<const N: i32>(a: int16x8_t) -> int16x4_t {
- static_assert_imm3!(N);
- simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 3);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3369,8 +3369,8 @@ pub unsafe fn vdup_laneq_s16<const N: i32>(a: int16x8_t) -> int16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_s32<const N: i32>(a: int32x4_t) -> int32x2_t {
- static_assert_imm2!(N);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert_uimm_bits!(N, 2);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3384,8 +3384,8 @@ pub unsafe fn vdup_laneq_s32<const N: i32>(a: int32x4_t) -> int32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_s8<const N: i32>(a: int8x8_t) -> int8x16_t {
- static_assert_imm3!(N);
- simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 3);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3399,8 +3399,8 @@ pub unsafe fn vdupq_lane_s8<const N: i32>(a: int8x8_t) -> int8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_s16<const N: i32>(a: int16x4_t) -> int16x8_t {
- static_assert_imm2!(N);
- simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 2);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3414,8 +3414,8 @@ pub unsafe fn vdupq_lane_s16<const N: i32>(a: int16x4_t) -> int16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_s32<const N: i32>(a: int32x2_t) -> int32x4_t {
- static_assert_imm1!(N);
- simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 1);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3429,8 +3429,8 @@ pub unsafe fn vdupq_lane_s32<const N: i32>(a: int32x2_t) -> int32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
- static_assert_imm3!(N);
- simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 3);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3444,8 +3444,8 @@ pub unsafe fn vdup_lane_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
- static_assert_imm4!(N);
- simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 4);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3459,8 +3459,8 @@ pub unsafe fn vdupq_laneq_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
- static_assert_imm2!(N);
- simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 2);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3474,8 +3474,8 @@ pub unsafe fn vdup_lane_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
- static_assert_imm3!(N);
- simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 3);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3489,8 +3489,8 @@ pub unsafe fn vdupq_laneq_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
- static_assert_imm1!(N);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert_uimm_bits!(N, 1);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3504,8 +3504,8 @@ pub unsafe fn vdup_lane_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
- static_assert_imm2!(N);
- simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 2);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3519,8 +3519,8 @@ pub unsafe fn vdupq_laneq_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_u8<const N: i32>(a: uint8x16_t) -> uint8x8_t {
- static_assert_imm4!(N);
- simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 4);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3534,8 +3534,8 @@ pub unsafe fn vdup_laneq_u8<const N: i32>(a: uint8x16_t) -> uint8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_u16<const N: i32>(a: uint16x8_t) -> uint16x4_t {
- static_assert_imm3!(N);
- simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 3);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3549,8 +3549,8 @@ pub unsafe fn vdup_laneq_u16<const N: i32>(a: uint16x8_t) -> uint16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_u32<const N: i32>(a: uint32x4_t) -> uint32x2_t {
- static_assert_imm2!(N);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert_uimm_bits!(N, 2);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3564,8 +3564,8 @@ pub unsafe fn vdup_laneq_u32<const N: i32>(a: uint32x4_t) -> uint32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_u8<const N: i32>(a: uint8x8_t) -> uint8x16_t {
- static_assert_imm3!(N);
- simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 3);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3579,8 +3579,8 @@ pub unsafe fn vdupq_lane_u8<const N: i32>(a: uint8x8_t) -> uint8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_u16<const N: i32>(a: uint16x4_t) -> uint16x8_t {
- static_assert_imm2!(N);
- simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 2);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3594,8 +3594,8 @@ pub unsafe fn vdupq_lane_u16<const N: i32>(a: uint16x4_t) -> uint16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_u32<const N: i32>(a: uint32x2_t) -> uint32x4_t {
- static_assert_imm1!(N);
- simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 1);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3609,8 +3609,8 @@ pub unsafe fn vdupq_lane_u32<const N: i32>(a: uint32x2_t) -> uint32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_p8<const N: i32>(a: poly8x8_t) -> poly8x8_t {
- static_assert_imm3!(N);
- simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 3);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3624,8 +3624,8 @@ pub unsafe fn vdup_lane_p8<const N: i32>(a: poly8x8_t) -> poly8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_p8<const N: i32>(a: poly8x16_t) -> poly8x16_t {
- static_assert_imm4!(N);
- simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 4);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3639,8 +3639,8 @@ pub unsafe fn vdupq_laneq_p8<const N: i32>(a: poly8x16_t) -> poly8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_p16<const N: i32>(a: poly16x4_t) -> poly16x4_t {
- static_assert_imm2!(N);
- simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 2);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3654,8 +3654,8 @@ pub unsafe fn vdup_lane_p16<const N: i32>(a: poly16x4_t) -> poly16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_p16<const N: i32>(a: poly16x8_t) -> poly16x8_t {
- static_assert_imm3!(N);
- simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 3);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3669,8 +3669,8 @@ pub unsafe fn vdupq_laneq_p16<const N: i32>(a: poly16x8_t) -> poly16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_p8<const N: i32>(a: poly8x16_t) -> poly8x8_t {
- static_assert_imm4!(N);
- simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 4);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3684,8 +3684,8 @@ pub unsafe fn vdup_laneq_p8<const N: i32>(a: poly8x16_t) -> poly8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_p16<const N: i32>(a: poly16x8_t) -> poly16x4_t {
- static_assert_imm3!(N);
- simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 3);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3699,8 +3699,8 @@ pub unsafe fn vdup_laneq_p16<const N: i32>(a: poly16x8_t) -> poly16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_p8<const N: i32>(a: poly8x8_t) -> poly8x16_t {
- static_assert_imm3!(N);
- simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 3);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3714,8 +3714,8 @@ pub unsafe fn vdupq_lane_p8<const N: i32>(a: poly8x8_t) -> poly8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_p16<const N: i32>(a: poly16x4_t) -> poly16x8_t {
- static_assert_imm2!(N);
- simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 2);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3729,8 +3729,8 @@ pub unsafe fn vdupq_lane_p16<const N: i32>(a: poly16x4_t) -> poly16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
- static_assert_imm1!(N);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert_uimm_bits!(N, 1);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3744,8 +3744,8 @@ pub unsafe fn vdupq_laneq_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_s64<const N: i32>(a: int64x1_t) -> int64x2_t {
- static_assert!(N : i32 where N == 0);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert!(N == 0);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3759,8 +3759,8 @@ pub unsafe fn vdupq_lane_s64<const N: i32>(a: int64x1_t) -> int64x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
- static_assert_imm1!(N);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert_uimm_bits!(N, 1);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3774,8 +3774,8 @@ pub unsafe fn vdupq_laneq_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x2_t {
- static_assert!(N : i32 where N == 0);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert!(N == 0);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3789,8 +3789,8 @@ pub unsafe fn vdupq_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_f32<const N: i32>(a: float32x2_t) -> float32x2_t {
- static_assert_imm1!(N);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert_uimm_bits!(N, 1);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3804,8 +3804,8 @@ pub unsafe fn vdup_lane_f32<const N: i32>(a: float32x2_t) -> float32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_laneq_f32<const N: i32>(a: float32x4_t) -> float32x4_t {
- static_assert_imm2!(N);
- simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 2);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3819,8 +3819,8 @@ pub unsafe fn vdupq_laneq_f32<const N: i32>(a: float32x4_t) -> float32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_f32<const N: i32>(a: float32x4_t) -> float32x2_t {
- static_assert_imm2!(N);
- simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32])
+ static_assert_uimm_bits!(N, 2);
+ simd_shuffle!(a, a, [N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3834,8 +3834,8 @@ pub unsafe fn vdup_laneq_f32<const N: i32>(a: float32x4_t) -> float32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdupq_lane_f32<const N: i32>(a: float32x2_t) -> float32x4_t {
- static_assert_imm1!(N);
- simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32])
+ static_assert_uimm_bits!(N, 1);
+ simd_shuffle!(a, a, [N as u32, N as u32, N as u32, N as u32])
}
/// Set all vector lanes to the same value
@@ -3849,7 +3849,7 @@ pub unsafe fn vdupq_lane_f32<const N: i32>(a: float32x2_t) -> float32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
- static_assert!(N : i32 where N == 0);
+ static_assert!(N == 0);
a
}
@@ -3864,7 +3864,7 @@ pub unsafe fn vdup_lane_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
- static_assert!(N : i32 where N == 0);
+ static_assert!(N == 0);
a
}
@@ -3879,7 +3879,7 @@ pub unsafe fn vdup_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_s64<const N: i32>(a: int64x2_t) -> int64x1_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
transmute::<i64, _>(simd_extract(a, N as u32))
}
@@ -3894,7 +3894,7 @@ pub unsafe fn vdup_laneq_s64<const N: i32>(a: int64x2_t) -> int64x1_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vdup_laneq_u64<const N: i32>(a: uint64x2_t) -> uint64x1_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
transmute::<u64, _>(simd_extract(a, N as u32))
}
@@ -3909,16 +3909,16 @@ pub unsafe fn vdup_laneq_u64<const N: i32>(a: uint64x2_t) -> uint64x1_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
match N & 0b111 {
- 0 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
- 2 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
- 3 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
- 4 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
- 5 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
- 6 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
- 7 => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
+ 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
+ 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
+ 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
+ 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
_ => unreachable_unchecked(),
}
}
@@ -3934,24 +3934,24 @@ pub unsafe fn vext_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
match N & 0b1111 {
- 0 => simd_shuffle16!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 1 => simd_shuffle16!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]),
- 2 => simd_shuffle16!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]),
- 3 => simd_shuffle16!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]),
- 4 => simd_shuffle16!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
- 5 => simd_shuffle16!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]),
- 6 => simd_shuffle16!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]),
- 7 => simd_shuffle16!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
- 8 => simd_shuffle16!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
- 9 => simd_shuffle16!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]),
- 10 => simd_shuffle16!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]),
- 11 => simd_shuffle16!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]),
- 12 => simd_shuffle16!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]),
- 13 => simd_shuffle16!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]),
- 14 => simd_shuffle16!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]),
- 15 => simd_shuffle16!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]),
+ 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
+ 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]),
+ 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]),
+ 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
+ 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
+ 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]),
+ 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]),
+ 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]),
+ 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]),
+ 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]),
+ 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]),
+ 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]),
_ => unreachable_unchecked(),
}
}
@@ -3967,12 +3967,12 @@ pub unsafe fn vextq_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
match N & 0b11 {
- 0 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
- 2 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
- 3 => simd_shuffle4!(a, b, [3, 4, 5, 6]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6]),
_ => unreachable_unchecked(),
}
}
@@ -3988,16 +3988,16 @@ pub unsafe fn vext_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
match N & 0b111 {
- 0 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
- 2 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
- 3 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
- 4 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
- 5 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
- 6 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
- 7 => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
+ 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
+ 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
+ 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
+ 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
_ => unreachable_unchecked(),
}
}
@@ -4013,10 +4013,10 @@ pub unsafe fn vextq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
match N & 0b1 {
- 0 => simd_shuffle2!(a, b, [0, 1]),
- 1 => simd_shuffle2!(a, b, [1, 2]),
+ 0 => simd_shuffle!(a, b, [0, 1]),
+ 1 => simd_shuffle!(a, b, [1, 2]),
_ => unreachable_unchecked(),
}
}
@@ -4032,12 +4032,12 @@ pub unsafe fn vext_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
match N & 0b11 {
- 0 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
- 2 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
- 3 => simd_shuffle4!(a, b, [3, 4, 5, 6]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6]),
_ => unreachable_unchecked(),
}
}
@@ -4053,16 +4053,16 @@ pub unsafe fn vextq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
match N & 0b111 {
- 0 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
- 2 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
- 3 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
- 4 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
- 5 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
- 6 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
- 7 => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
+ 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
+ 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
+ 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
+ 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
_ => unreachable_unchecked(),
}
}
@@ -4078,24 +4078,24 @@ pub unsafe fn vext_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
match N & 0b1111 {
- 0 => simd_shuffle16!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 1 => simd_shuffle16!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]),
- 2 => simd_shuffle16!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]),
- 3 => simd_shuffle16!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]),
- 4 => simd_shuffle16!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
- 5 => simd_shuffle16!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]),
- 6 => simd_shuffle16!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]),
- 7 => simd_shuffle16!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
- 8 => simd_shuffle16!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
- 9 => simd_shuffle16!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]),
- 10 => simd_shuffle16!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]),
- 11 => simd_shuffle16!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]),
- 12 => simd_shuffle16!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]),
- 13 => simd_shuffle16!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]),
- 14 => simd_shuffle16!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]),
- 15 => simd_shuffle16!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]),
+ 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
+ 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]),
+ 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]),
+ 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
+ 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
+ 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]),
+ 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]),
+ 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]),
+ 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]),
+ 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]),
+ 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]),
+ 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]),
_ => unreachable_unchecked(),
}
}
@@ -4111,12 +4111,12 @@ pub unsafe fn vextq_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
match N & 0b11 {
- 0 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
- 2 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
- 3 => simd_shuffle4!(a, b, [3, 4, 5, 6]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6]),
_ => unreachable_unchecked(),
}
}
@@ -4132,16 +4132,16 @@ pub unsafe fn vext_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
match N & 0b111 {
- 0 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
- 2 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
- 3 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
- 4 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
- 5 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
- 6 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
- 7 => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
+ 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
+ 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
+ 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
+ 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
_ => unreachable_unchecked(),
}
}
@@ -4157,10 +4157,10 @@ pub unsafe fn vextq_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
match N & 0b1 {
- 0 => simd_shuffle2!(a, b, [0, 1]),
- 1 => simd_shuffle2!(a, b, [1, 2]),
+ 0 => simd_shuffle!(a, b, [0, 1]),
+ 1 => simd_shuffle!(a, b, [1, 2]),
_ => unreachable_unchecked(),
}
}
@@ -4176,12 +4176,12 @@ pub unsafe fn vext_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
match N & 0b11 {
- 0 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
- 2 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
- 3 => simd_shuffle4!(a, b, [3, 4, 5, 6]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6]),
_ => unreachable_unchecked(),
}
}
@@ -4197,16 +4197,16 @@ pub unsafe fn vextq_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
match N & 0b111 {
- 0 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
- 2 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
- 3 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
- 4 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
- 5 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
- 6 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
- 7 => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
+ 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
+ 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
+ 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
+ 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
_ => unreachable_unchecked(),
}
}
@@ -4222,24 +4222,24 @@ pub unsafe fn vext_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
match N & 0b1111 {
- 0 => simd_shuffle16!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
- 1 => simd_shuffle16!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]),
- 2 => simd_shuffle16!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]),
- 3 => simd_shuffle16!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]),
- 4 => simd_shuffle16!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
- 5 => simd_shuffle16!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]),
- 6 => simd_shuffle16!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]),
- 7 => simd_shuffle16!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
- 8 => simd_shuffle16!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
- 9 => simd_shuffle16!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]),
- 10 => simd_shuffle16!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]),
- 11 => simd_shuffle16!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]),
- 12 => simd_shuffle16!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]),
- 13 => simd_shuffle16!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]),
- 14 => simd_shuffle16!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]),
- 15 => simd_shuffle16!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]),
+ 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
+ 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]),
+ 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]),
+ 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
+ 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
+ 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]),
+ 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]),
+ 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]),
+ 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]),
+ 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]),
+ 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]),
+ 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]),
_ => unreachable_unchecked(),
}
}
@@ -4255,12 +4255,12 @@ pub unsafe fn vextq_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
match N & 0b11 {
- 0 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
- 2 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
- 3 => simd_shuffle4!(a, b, [3, 4, 5, 6]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6]),
_ => unreachable_unchecked(),
}
}
@@ -4276,16 +4276,16 @@ pub unsafe fn vext_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
match N & 0b111 {
- 0 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
- 1 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
- 2 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
- 3 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
- 4 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
- 5 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
- 6 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
- 7 => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
+ 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
+ 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
+ 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
+ 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
_ => unreachable_unchecked(),
}
}
@@ -4301,10 +4301,10 @@ pub unsafe fn vextq_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
match N & 0b1 {
- 0 => simd_shuffle2!(a, b, [0, 1]),
- 1 => simd_shuffle2!(a, b, [1, 2]),
+ 0 => simd_shuffle!(a, b, [0, 1]),
+ 1 => simd_shuffle!(a, b, [1, 2]),
_ => unreachable_unchecked(),
}
}
@@ -4320,10 +4320,10 @@ pub unsafe fn vextq_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
match N & 0b1 {
- 0 => simd_shuffle2!(a, b, [0, 1]),
- 1 => simd_shuffle2!(a, b, [1, 2]),
+ 0 => simd_shuffle!(a, b, [0, 1]),
+ 1 => simd_shuffle!(a, b, [1, 2]),
_ => unreachable_unchecked(),
}
}
@@ -4339,10 +4339,10 @@ pub unsafe fn vextq_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vext_f32<const N: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
match N & 0b1 {
- 0 => simd_shuffle2!(a, b, [0, 1]),
- 1 => simd_shuffle2!(a, b, [1, 2]),
+ 0 => simd_shuffle!(a, b, [0, 1]),
+ 1 => simd_shuffle!(a, b, [1, 2]),
_ => unreachable_unchecked(),
}
}
@@ -4358,12 +4358,12 @@ pub unsafe fn vext_f32<const N: i32>(a: float32x2_t, b: float32x2_t) -> float32x
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vextq_f32<const N: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
match N & 0b11 {
- 0 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
- 1 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
- 2 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
- 3 => simd_shuffle4!(a, b, [3, 4, 5, 6]),
+ 0 => simd_shuffle!(a, b, [0, 1, 2, 3]),
+ 1 => simd_shuffle!(a, b, [1, 2, 3, 4]),
+ 2 => simd_shuffle!(a, b, [2, 3, 4, 5]),
+ 3 => simd_shuffle!(a, b, [3, 4, 5, 6]),
_ => unreachable_unchecked(),
}
}
@@ -4691,8 +4691,8 @@ pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
- static_assert_imm2!(LANE);
- vmla_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmla_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4706,8 +4706,8 @@ pub unsafe fn vmla_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int1
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
- static_assert_imm3!(LANE);
- vmla_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmla_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4721,8 +4721,8 @@ pub unsafe fn vmla_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
- static_assert_imm2!(LANE);
- vmlaq_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlaq_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4736,8 +4736,8 @@ pub unsafe fn vmlaq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
- static_assert_imm3!(LANE);
- vmlaq_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmlaq_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4751,8 +4751,8 @@ pub unsafe fn vmlaq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: in
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
- static_assert_imm1!(LANE);
- vmla_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4766,8 +4766,8 @@ pub unsafe fn vmla_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int3
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
- static_assert_imm2!(LANE);
- vmla_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmla_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4781,8 +4781,8 @@ pub unsafe fn vmla_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
- static_assert_imm1!(LANE);
- vmlaq_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlaq_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4796,8 +4796,8 @@ pub unsafe fn vmlaq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- vmlaq_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlaq_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4811,8 +4811,8 @@ pub unsafe fn vmlaq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: in
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t {
- static_assert_imm2!(LANE);
- vmla_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmla_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4826,8 +4826,8 @@ pub unsafe fn vmla_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: ui
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t {
- static_assert_imm3!(LANE);
- vmla_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmla_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4841,8 +4841,8 @@ pub unsafe fn vmla_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: u
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t {
- static_assert_imm2!(LANE);
- vmlaq_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlaq_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4856,8 +4856,8 @@ pub unsafe fn vmlaq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: u
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
- static_assert_imm3!(LANE);
- vmlaq_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmlaq_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4871,8 +4871,8 @@ pub unsafe fn vmlaq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c:
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t {
- static_assert_imm1!(LANE);
- vmla_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4886,8 +4886,8 @@ pub unsafe fn vmla_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: ui
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t {
- static_assert_imm2!(LANE);
- vmla_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmla_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4901,8 +4901,8 @@ pub unsafe fn vmla_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: u
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t {
- static_assert_imm1!(LANE);
- vmlaq_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlaq_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4916,8 +4916,8 @@ pub unsafe fn vmlaq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: u
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
- static_assert_imm2!(LANE);
- vmlaq_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlaq_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4931,8 +4931,8 @@ pub unsafe fn vmlaq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c:
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
- static_assert_imm1!(LANE);
- vmla_f32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4946,8 +4946,8 @@ pub unsafe fn vmla_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmla_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
- static_assert_imm2!(LANE);
- vmla_f32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmla_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4961,8 +4961,8 @@ pub unsafe fn vmla_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
- static_assert_imm1!(LANE);
- vmlaq_f32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlaq_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply accumulate with scalar
@@ -4976,8 +4976,8 @@ pub unsafe fn vmlaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c:
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
- static_assert_imm2!(LANE);
- vmlaq_f32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlaq_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Signed multiply-add long
@@ -5121,8 +5121,8 @@ pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t {
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- vmlal_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlal_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
@@ -5136,8 +5136,8 @@ pub unsafe fn vmlal_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
- static_assert_imm3!(LANE);
- vmlal_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmlal_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
@@ -5151,8 +5151,8 @@ pub unsafe fn vmlal_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: in
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
- static_assert_imm1!(LANE);
- vmlal_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
@@ -5166,8 +5166,8 @@ pub unsafe fn vmlal_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
- static_assert_imm2!(LANE);
- vmlal_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlal_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
@@ -5181,8 +5181,8 @@ pub unsafe fn vmlal_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: in
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t {
- static_assert_imm2!(LANE);
- vmlal_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlal_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
@@ -5196,8 +5196,8 @@ pub unsafe fn vmlal_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: u
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t {
- static_assert_imm3!(LANE);
- vmlal_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmlal_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
@@ -5211,8 +5211,8 @@ pub unsafe fn vmlal_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c:
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t {
- static_assert_imm1!(LANE);
- vmlal_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector widening multiply accumulate with scalar
@@ -5226,8 +5226,8 @@ pub unsafe fn vmlal_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: u
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlal_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t {
- static_assert_imm2!(LANE);
- vmlal_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlal_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Multiply-subtract from accumulator
@@ -5553,8 +5553,8 @@ pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
- static_assert_imm2!(LANE);
- vmls_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmls_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5568,8 +5568,8 @@ pub unsafe fn vmls_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int1
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
- static_assert_imm3!(LANE);
- vmls_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmls_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5583,8 +5583,8 @@ pub unsafe fn vmls_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
- static_assert_imm2!(LANE);
- vmlsq_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsq_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5598,8 +5598,8 @@ pub unsafe fn vmlsq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
- static_assert_imm3!(LANE);
- vmlsq_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmlsq_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5613,8 +5613,8 @@ pub unsafe fn vmlsq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: in
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
- static_assert_imm1!(LANE);
- vmls_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5628,8 +5628,8 @@ pub unsafe fn vmls_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int3
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
- static_assert_imm2!(LANE);
- vmls_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmls_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5643,8 +5643,8 @@ pub unsafe fn vmls_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
- static_assert_imm1!(LANE);
- vmlsq_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlsq_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5658,8 +5658,8 @@ pub unsafe fn vmlsq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- vmlsq_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsq_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5673,8 +5673,8 @@ pub unsafe fn vmlsq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: in
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t {
- static_assert_imm2!(LANE);
- vmls_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmls_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5688,8 +5688,8 @@ pub unsafe fn vmls_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: ui
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t {
- static_assert_imm3!(LANE);
- vmls_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmls_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5703,8 +5703,8 @@ pub unsafe fn vmls_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: u
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t {
- static_assert_imm2!(LANE);
- vmlsq_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsq_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5718,8 +5718,8 @@ pub unsafe fn vmlsq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: u
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
- static_assert_imm3!(LANE);
- vmlsq_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmlsq_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5733,8 +5733,8 @@ pub unsafe fn vmlsq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c:
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t {
- static_assert_imm1!(LANE);
- vmls_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5748,8 +5748,8 @@ pub unsafe fn vmls_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: ui
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t {
- static_assert_imm2!(LANE);
- vmls_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmls_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5763,8 +5763,8 @@ pub unsafe fn vmls_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: u
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t {
- static_assert_imm1!(LANE);
- vmlsq_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlsq_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5778,8 +5778,8 @@ pub unsafe fn vmlsq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: u
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
- static_assert_imm2!(LANE);
- vmlsq_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsq_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5793,8 +5793,8 @@ pub unsafe fn vmlsq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c:
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
- static_assert_imm1!(LANE);
- vmls_f32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5808,8 +5808,8 @@ pub unsafe fn vmls_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmls_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t {
- static_assert_imm2!(LANE);
- vmls_f32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmls_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5823,8 +5823,8 @@ pub unsafe fn vmls_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c:
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t {
- static_assert_imm1!(LANE);
- vmlsq_f32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlsq_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector multiply subtract with scalar
@@ -5838,8 +5838,8 @@ pub unsafe fn vmlsq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c:
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
- static_assert_imm2!(LANE);
- vmlsq_f32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsq_f32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Signed multiply-subtract long
@@ -5983,8 +5983,8 @@ pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t {
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- vmlsl_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsl_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
@@ -5998,8 +5998,8 @@ pub unsafe fn vmlsl_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
- static_assert_imm3!(LANE);
- vmlsl_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmlsl_s16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
@@ -6013,8 +6013,8 @@ pub unsafe fn vmlsl_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: in
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
- static_assert_imm1!(LANE);
- vmlsl_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
@@ -6028,8 +6028,8 @@ pub unsafe fn vmlsl_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
- static_assert_imm2!(LANE);
- vmlsl_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsl_s32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
@@ -6043,8 +6043,8 @@ pub unsafe fn vmlsl_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: in
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t {
- static_assert_imm2!(LANE);
- vmlsl_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsl_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
@@ -6058,8 +6058,8 @@ pub unsafe fn vmlsl_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: u
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t {
- static_assert_imm3!(LANE);
- vmlsl_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmlsl_u16(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
@@ -6073,8 +6073,8 @@ pub unsafe fn vmlsl_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c:
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t {
- static_assert_imm1!(LANE);
- vmlsl_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Vector widening multiply subtract with scalar
@@ -6088,8 +6088,8 @@ pub unsafe fn vmlsl_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: u
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmlsl_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t {
- static_assert_imm2!(LANE);
- vmlsl_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmlsl_u32(a, b, simd_shuffle!(c, c, [LANE as u32, LANE as u32]))
}
/// Negate
@@ -8341,7 +8341,7 @@ pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x2)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -8354,7 +8354,7 @@ pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x3)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -8367,7 +8367,7 @@ pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x4)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -8380,7 +8380,7 @@ pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -8393,7 +8393,7 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -8406,7 +8406,7 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -8907,7 +8907,7 @@ pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -9360,7 +9360,7 @@ pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -9443,7 +9443,7 @@ vld2q_dup_f32_(a as _)
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2_lane_s8<const LANE: i32>(a: *const i8, b: int8x8x2_t) -> int8x8x2_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i8.p0i8")]
@@ -9462,7 +9462,7 @@ vld2_lane_s8_(a as _, b.0, b.1, LANE, 1)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_lane_s8<const LANE: i32>(a: *const i8, b: int8x8x2_t) -> int8x8x2_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v8i8.p0i8")]
@@ -9480,7 +9480,7 @@ vld2_lane_s8_(b.0, b.1, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2_lane_s16<const LANE: i32>(a: *const i16, b: int16x4x2_t) -> int16x4x2_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i16.p0i8")]
@@ -9499,7 +9499,7 @@ vld2_lane_s16_(a as _, b.0, b.1, LANE, 2)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_lane_s16<const LANE: i32>(a: *const i16, b: int16x4x2_t) -> int16x4x2_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v4i16.p0i8")]
@@ -9517,7 +9517,7 @@ vld2_lane_s16_(b.0, b.1, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2_lane_s32<const LANE: i32>(a: *const i32, b: int32x2x2_t) -> int32x2x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2i32.p0i8")]
@@ -9536,7 +9536,7 @@ vld2_lane_s32_(a as _, b.0, b.1, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_lane_s32<const LANE: i32>(a: *const i32, b: int32x2x2_t) -> int32x2x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v2i32.p0i8")]
@@ -9554,7 +9554,7 @@ vld2_lane_s32_(b.0, b.1, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2q_lane_s16<const LANE: i32>(a: *const i16, b: int16x8x2_t) -> int16x8x2_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v8i16.p0i8")]
@@ -9573,7 +9573,7 @@ vld2q_lane_s16_(a as _, b.0, b.1, LANE, 2)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_s16<const LANE: i32>(a: *const i16, b: int16x8x2_t) -> int16x8x2_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v8i16.p0i8")]
@@ -9591,7 +9591,7 @@ vld2q_lane_s16_(b.0, b.1, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2q_lane_s32<const LANE: i32>(a: *const i32, b: int32x4x2_t) -> int32x4x2_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4i32.p0i8")]
@@ -9610,7 +9610,7 @@ vld2q_lane_s32_(a as _, b.0, b.1, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_s32<const LANE: i32>(a: *const i32, b: int32x4x2_t) -> int32x4x2_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v4i32.p0i8")]
@@ -9630,7 +9630,7 @@ vld2q_lane_s32_(b.0, b.1, LANE as i64, a as _)
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vld2_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -9645,7 +9645,7 @@ pub unsafe fn vld2_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x2_t) -> uin
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vld2_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -9660,7 +9660,7 @@ pub unsafe fn vld2_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x2_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vld2_lane_s32::<LANE>(transmute(a), transmute(b)))
}
@@ -9675,7 +9675,7 @@ pub unsafe fn vld2_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x2_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vld2q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -9690,7 +9690,7 @@ pub unsafe fn vld2q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x2_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vld2q_lane_s32::<LANE>(transmute(a), transmute(b)))
}
@@ -9705,7 +9705,7 @@ pub unsafe fn vld2q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x2_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vld2_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -9720,7 +9720,7 @@ pub unsafe fn vld2_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x2_t) -> pol
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vld2_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -9735,7 +9735,7 @@ pub unsafe fn vld2_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x2_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld2q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vld2q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -9748,7 +9748,7 @@ pub unsafe fn vld2q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x2_t) ->
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2_lane_f32<const LANE: i32>(a: *const f32, b: float32x2x2_t) -> float32x2x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v2f32.p0i8")]
@@ -9767,7 +9767,7 @@ vld2_lane_f32_(a as _, b.0, b.1, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2_lane_f32<const LANE: i32>(a: *const f32, b: float32x2x2_t) -> float32x2x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v2f32.p0i8")]
@@ -9785,7 +9785,7 @@ vld2_lane_f32_(b.0, b.1, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld2q_lane_f32<const LANE: i32>(a: *const f32, b: float32x4x2_t) -> float32x4x2_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld2lane.v4f32.p0i8")]
@@ -9804,7 +9804,7 @@ vld2q_lane_f32_(a as _, b.0, b.1, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld2q_lane_f32<const LANE: i32>(a: *const f32, b: float32x4x2_t) -> float32x4x2_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld2lane.v4f32.p0i8")]
@@ -10192,7 +10192,7 @@ pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -10645,7 +10645,7 @@ pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -10728,7 +10728,7 @@ vld3q_dup_f32_(a as _)
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3_lane_s8<const LANE: i32>(a: *const i8, b: int8x8x3_t) -> int8x8x3_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i8.p0i8")]
@@ -10747,7 +10747,7 @@ vld3_lane_s8_(a as _, b.0, b.1, b.2, LANE, 1)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_lane_s8<const LANE: i32>(a: *const i8, b: int8x8x3_t) -> int8x8x3_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v8i8.p0i8")]
@@ -10765,7 +10765,7 @@ vld3_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3_lane_s16<const LANE: i32>(a: *const i16, b: int16x4x3_t) -> int16x4x3_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i16.p0i8")]
@@ -10784,7 +10784,7 @@ vld3_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_lane_s16<const LANE: i32>(a: *const i16, b: int16x4x3_t) -> int16x4x3_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v4i16.p0i8")]
@@ -10802,7 +10802,7 @@ vld3_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3_lane_s32<const LANE: i32>(a: *const i32, b: int32x2x3_t) -> int32x2x3_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2i32.p0i8")]
@@ -10821,7 +10821,7 @@ vld3_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_lane_s32<const LANE: i32>(a: *const i32, b: int32x2x3_t) -> int32x2x3_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v2i32.p0i8")]
@@ -10839,7 +10839,7 @@ vld3_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3q_lane_s16<const LANE: i32>(a: *const i16, b: int16x8x3_t) -> int16x8x3_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v8i16.p0i8")]
@@ -10858,7 +10858,7 @@ vld3q_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_s16<const LANE: i32>(a: *const i16, b: int16x8x3_t) -> int16x8x3_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v8i16.p0i8")]
@@ -10876,7 +10876,7 @@ vld3q_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3q_lane_s32<const LANE: i32>(a: *const i32, b: int32x4x3_t) -> int32x4x3_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4i32.p0i8")]
@@ -10895,7 +10895,7 @@ vld3q_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_s32<const LANE: i32>(a: *const i32, b: int32x4x3_t) -> int32x4x3_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v4i32.p0i8")]
@@ -10915,7 +10915,7 @@ vld3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _)
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vld3_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -10930,7 +10930,7 @@ pub unsafe fn vld3_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x3_t) -> uin
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vld3_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -10945,7 +10945,7 @@ pub unsafe fn vld3_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x3_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vld3_lane_s32::<LANE>(transmute(a), transmute(b)))
}
@@ -10960,7 +10960,7 @@ pub unsafe fn vld3_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x3_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vld3q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -10975,7 +10975,7 @@ pub unsafe fn vld3q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x3_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vld3q_lane_s32::<LANE>(transmute(a), transmute(b)))
}
@@ -10990,7 +10990,7 @@ pub unsafe fn vld3q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x3_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vld3_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -11005,7 +11005,7 @@ pub unsafe fn vld3_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x3_t) -> pol
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vld3_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -11020,7 +11020,7 @@ pub unsafe fn vld3_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x3_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld3q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vld3q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -11033,7 +11033,7 @@ pub unsafe fn vld3q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x3_t) ->
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3_lane_f32<const LANE: i32>(a: *const f32, b: float32x2x3_t) -> float32x2x3_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v2f32.p0i8")]
@@ -11052,7 +11052,7 @@ vld3_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3_lane_f32<const LANE: i32>(a: *const f32, b: float32x2x3_t) -> float32x2x3_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v2f32.p0i8")]
@@ -11070,7 +11070,7 @@ vld3_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld3q_lane_f32<const LANE: i32>(a: *const f32, b: float32x4x3_t) -> float32x4x3_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld3lane.v4f32.p0i8")]
@@ -11089,7 +11089,7 @@ vld3q_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld3q_lane_f32<const LANE: i32>(a: *const f32, b: float32x4x3_t) -> float32x4x3_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld3lane.v4f32.p0i8")]
@@ -11477,7 +11477,7 @@ pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -11930,7 +11930,7 @@ pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -12013,7 +12013,7 @@ vld4q_dup_f32_(a as _)
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4_lane_s8<const LANE: i32>(a: *const i8, b: int8x8x4_t) -> int8x8x4_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i8.p0i8")]
@@ -12032,7 +12032,7 @@ vld4_lane_s8_(a as _, b.0, b.1, b.2, b.3, LANE, 1)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_lane_s8<const LANE: i32>(a: *const i8, b: int8x8x4_t) -> int8x8x4_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v8i8.p0i8")]
@@ -12050,7 +12050,7 @@ vld4_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4_lane_s16<const LANE: i32>(a: *const i16, b: int16x4x4_t) -> int16x4x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i16.p0i8")]
@@ -12069,7 +12069,7 @@ vld4_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_lane_s16<const LANE: i32>(a: *const i16, b: int16x4x4_t) -> int16x4x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v4i16.p0i8")]
@@ -12087,7 +12087,7 @@ vld4_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4_lane_s32<const LANE: i32>(a: *const i32, b: int32x2x4_t) -> int32x2x4_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2i32.p0i8")]
@@ -12106,7 +12106,7 @@ vld4_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_lane_s32<const LANE: i32>(a: *const i32, b: int32x2x4_t) -> int32x2x4_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v2i32.p0i8")]
@@ -12124,7 +12124,7 @@ vld4_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4q_lane_s16<const LANE: i32>(a: *const i16, b: int16x8x4_t) -> int16x8x4_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v8i16.p0i8")]
@@ -12143,7 +12143,7 @@ vld4q_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_s16<const LANE: i32>(a: *const i16, b: int16x8x4_t) -> int16x8x4_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v8i16.p0i8")]
@@ -12161,7 +12161,7 @@ vld4q_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4q_lane_s32<const LANE: i32>(a: *const i32, b: int32x4x4_t) -> int32x4x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4i32.p0i8")]
@@ -12180,7 +12180,7 @@ vld4q_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_s32<const LANE: i32>(a: *const i32, b: int32x4x4_t) -> int32x4x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v4i32.p0i8")]
@@ -12200,7 +12200,7 @@ vld4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vld4_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -12215,7 +12215,7 @@ pub unsafe fn vld4_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x4_t) -> uin
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vld4_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -12230,7 +12230,7 @@ pub unsafe fn vld4_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x4_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vld4_lane_s32::<LANE>(transmute(a), transmute(b)))
}
@@ -12245,7 +12245,7 @@ pub unsafe fn vld4_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x4_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vld4q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -12260,7 +12260,7 @@ pub unsafe fn vld4q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x4_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vld4q_lane_s32::<LANE>(transmute(a), transmute(b)))
}
@@ -12275,7 +12275,7 @@ pub unsafe fn vld4q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x4_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vld4_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -12290,7 +12290,7 @@ pub unsafe fn vld4_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x4_t) -> pol
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vld4_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -12305,7 +12305,7 @@ pub unsafe fn vld4_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x4_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vld4q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vld4q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -12318,7 +12318,7 @@ pub unsafe fn vld4q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x4_t) ->
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4_lane_f32<const LANE: i32>(a: *const f32, b: float32x2x4_t) -> float32x2x4_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v2f32.p0i8")]
@@ -12337,7 +12337,7 @@ vld4_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4_lane_f32<const LANE: i32>(a: *const f32, b: float32x2x4_t) -> float32x2x4_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v2f32.p0i8")]
@@ -12355,7 +12355,7 @@ vld4_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vld4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vld4q_lane_f32<const LANE: i32>(a: *const f32, b: float32x4x4_t) -> float32x4x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vld4lane.v4f32.p0i8")]
@@ -12374,7 +12374,7 @@ vld4q_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vld4q_lane_f32<const LANE: i32>(a: *const f32, b: float32x4x4_t) -> float32x4x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.ld4lane.v4f32.p0i8")]
@@ -12394,7 +12394,7 @@ vld4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
*a = simd_extract(b, LANE as u32);
}
@@ -12409,7 +12409,7 @@ pub unsafe fn vst1_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
*a = simd_extract(b, LANE as u32);
}
@@ -12424,7 +12424,7 @@ pub unsafe fn vst1_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
*a = simd_extract(b, LANE as u32);
}
@@ -12439,7 +12439,7 @@ pub unsafe fn vst1_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
*a = simd_extract(b, LANE as u32);
}
@@ -12454,7 +12454,7 @@ pub unsafe fn vst1_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16_t) {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
*a = simd_extract(b, LANE as u32);
}
@@ -12469,7 +12469,7 @@ pub unsafe fn vst1q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
*a = simd_extract(b, LANE as u32);
}
@@ -12484,7 +12484,7 @@ pub unsafe fn vst1q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
*a = simd_extract(b, LANE as u32);
}
@@ -12499,7 +12499,7 @@ pub unsafe fn vst1q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
*a = simd_extract(b, LANE as u32);
}
@@ -12514,7 +12514,7 @@ pub unsafe fn vst1q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
*a = simd_extract(b, LANE as u32);
}
@@ -12529,7 +12529,7 @@ pub unsafe fn vst1_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
*a = simd_extract(b, LANE as u32);
}
@@ -12544,7 +12544,7 @@ pub unsafe fn vst1_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
*a = simd_extract(b, LANE as u32);
}
@@ -12559,7 +12559,7 @@ pub unsafe fn vst1_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
*a = simd_extract(b, LANE as u32);
}
@@ -12574,7 +12574,7 @@ pub unsafe fn vst1_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16_t) {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
*a = simd_extract(b, LANE as u32);
}
@@ -12589,7 +12589,7 @@ pub unsafe fn vst1q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
*a = simd_extract(b, LANE as u32);
}
@@ -12604,7 +12604,7 @@ pub unsafe fn vst1q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
*a = simd_extract(b, LANE as u32);
}
@@ -12619,7 +12619,7 @@ pub unsafe fn vst1q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
*a = simd_extract(b, LANE as u32);
}
@@ -12634,7 +12634,7 @@ pub unsafe fn vst1q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
*a = simd_extract(b, LANE as u32);
}
@@ -12649,7 +12649,7 @@ pub unsafe fn vst1_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
*a = simd_extract(b, LANE as u32);
}
@@ -12664,7 +12664,7 @@ pub unsafe fn vst1_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16_t) {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
*a = simd_extract(b, LANE as u32);
}
@@ -12679,7 +12679,7 @@ pub unsafe fn vst1q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
*a = simd_extract(b, LANE as u32);
}
@@ -12688,13 +12688,13 @@ pub unsafe fn vst1q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8_t) {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1_t) {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
*a = simd_extract(b, LANE as u32);
}
@@ -12703,13 +12703,13 @@ pub unsafe fn vst1_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1_t) {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
*a = simd_extract(b, LANE as u32);
}
@@ -12724,7 +12724,7 @@ pub unsafe fn vst1q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
*a = simd_extract(b, LANE as u32);
}
@@ -12739,7 +12739,7 @@ pub unsafe fn vst1_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst1q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
*a = simd_extract(b, LANE as u32);
}
@@ -14008,7 +14008,7 @@ pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x2)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -14021,7 +14021,7 @@ pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x3)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -14034,7 +14034,7 @@ pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x4)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -14047,7 +14047,7 @@ pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -14060,7 +14060,7 @@ pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -14073,7 +14073,7 @@ pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -14658,7 +14658,7 @@ pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -14741,7 +14741,7 @@ vst2q_f32_(b.0, b.1, a as _)
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8x2_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i8")]
@@ -14760,7 +14760,7 @@ vst2_lane_s8_(a as _, b.0, b.1, LANE, 1)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8x2_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v8i8.p0i8")]
@@ -14778,7 +14778,7 @@ vst2_lane_s8_(b.0, b.1, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4x2_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i16")]
@@ -14797,7 +14797,7 @@ vst2_lane_s16_(a as _, b.0, b.1, LANE, 2)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4x2_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v4i16.p0i8")]
@@ -14815,7 +14815,7 @@ vst2_lane_s16_(b.0, b.1, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2i32")]
@@ -14834,7 +14834,7 @@ vst2_lane_s32_(a as _, b.0, b.1, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v2i32.p0i8")]
@@ -14852,7 +14852,7 @@ vst2_lane_s32_(b.0, b.1, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8x2_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v8i16")]
@@ -14871,7 +14871,7 @@ vst2q_lane_s16_(a as _, b.0, b.1, LANE, 2)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8x2_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v8i16.p0i8")]
@@ -14889,7 +14889,7 @@ vst2q_lane_s16_(b.0, b.1, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4x2_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4i32")]
@@ -14908,7 +14908,7 @@ vst2q_lane_s32_(a as _, b.0, b.1, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4x2_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v4i32.p0i8")]
@@ -14928,7 +14928,7 @@ vst2q_lane_s32_(b.0, b.1, LANE as i64, a as _)
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x2_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vst2_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -14943,7 +14943,7 @@ pub unsafe fn vst2_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x2_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vst2_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -14958,7 +14958,7 @@ pub unsafe fn vst2_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vst2_lane_s32::<LANE>(transmute(a), transmute(b)))
}
@@ -14973,7 +14973,7 @@ pub unsafe fn vst2_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x2_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vst2q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -14988,7 +14988,7 @@ pub unsafe fn vst2q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x2_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vst2q_lane_s32::<LANE>(transmute(a), transmute(b)))
}
@@ -15003,7 +15003,7 @@ pub unsafe fn vst2q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x2_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vst2_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -15018,7 +15018,7 @@ pub unsafe fn vst2_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x2_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vst2_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -15033,7 +15033,7 @@ pub unsafe fn vst2_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x2_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst2q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x2_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vst2q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -15046,7 +15046,7 @@ pub unsafe fn vst2q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x2_t) {
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v2f32")]
@@ -15065,7 +15065,7 @@ vst2_lane_f32_(a as _, b.0, b.1, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2x2_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v2f32.p0i8")]
@@ -15083,7 +15083,7 @@ vst2_lane_f32_(b.0, b.1, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst2, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst2q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x2_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0i8.v4f32")]
@@ -15102,7 +15102,7 @@ vst2q_lane_f32_(a as _, b.0, b.1, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst2q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x2_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st2lane.v4f32.p0i8")]
@@ -15490,7 +15490,7 @@ pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -15573,7 +15573,7 @@ vst3q_f32_(b.0, b.1, b.2, a as _)
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8x3_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i8")]
@@ -15592,7 +15592,7 @@ vst3_lane_s8_(a as _, b.0, b.1, b.2, LANE, 1)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8x3_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v8i8.p0i8")]
@@ -15610,7 +15610,7 @@ vst3_lane_s8_(b.0, b.1, b.2, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4x3_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i16")]
@@ -15629,7 +15629,7 @@ vst3_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4x3_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v4i16.p0i8")]
@@ -15647,7 +15647,7 @@ vst3_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2x3_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2i32")]
@@ -15666,7 +15666,7 @@ vst3_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2x3_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v2i32.p0i8")]
@@ -15684,7 +15684,7 @@ vst3_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8x3_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v8i16")]
@@ -15703,7 +15703,7 @@ vst3q_lane_s16_(a as _, b.0, b.1, b.2, LANE, 2)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8x3_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v8i16.p0i8")]
@@ -15721,7 +15721,7 @@ vst3q_lane_s16_(b.0, b.1, b.2, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4x3_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4i32")]
@@ -15740,7 +15740,7 @@ vst3q_lane_s32_(a as _, b.0, b.1, b.2, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4x3_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v4i32.p0i8")]
@@ -15760,7 +15760,7 @@ vst3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _)
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x3_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vst3_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -15775,7 +15775,7 @@ pub unsafe fn vst3_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x3_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x3_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vst3_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -15790,7 +15790,7 @@ pub unsafe fn vst3_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x3_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x3_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vst3_lane_s32::<LANE>(transmute(a), transmute(b)))
}
@@ -15805,7 +15805,7 @@ pub unsafe fn vst3_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x3_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x3_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vst3q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -15820,7 +15820,7 @@ pub unsafe fn vst3q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x3_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x3_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vst3q_lane_s32::<LANE>(transmute(a), transmute(b)))
}
@@ -15835,7 +15835,7 @@ pub unsafe fn vst3q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x3_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x3_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vst3_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -15850,7 +15850,7 @@ pub unsafe fn vst3_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x3_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x3_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vst3_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -15865,7 +15865,7 @@ pub unsafe fn vst3_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x3_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst3q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x3_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vst3q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -15878,7 +15878,7 @@ pub unsafe fn vst3q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x3_t) {
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2x3_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v2f32")]
@@ -15897,7 +15897,7 @@ vst3_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2x3_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v2f32.p0i8")]
@@ -15915,7 +15915,7 @@ vst3_lane_f32_(b.0, b.1, b.2, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst3, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst3q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x3_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst3lane.p0i8.v4f32")]
@@ -15934,7 +15934,7 @@ vst3q_lane_f32_(a as _, b.0, b.1, b.2, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst3q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x3_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st3lane.v4f32.p0i8")]
@@ -16322,7 +16322,7 @@ pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -16405,7 +16405,7 @@ vst4q_f32_(b.0, b.1, b.2, b.3, a as _)
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8x4_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i8")]
@@ -16424,7 +16424,7 @@ vst4_lane_s8_(a as _, b.0, b.1, b.2, b.3, LANE, 1)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8x4_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v8i8.p0i8")]
@@ -16442,7 +16442,7 @@ vst4_lane_s8_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i16")]
@@ -16461,7 +16461,7 @@ vst4_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v4i16.p0i8")]
@@ -16479,7 +16479,7 @@ vst4_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2x4_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2i32")]
@@ -16498,7 +16498,7 @@ vst4_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2x4_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v2i32.p0i8")]
@@ -16516,7 +16516,7 @@ vst4_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8x4_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v8i16")]
@@ -16535,7 +16535,7 @@ vst4q_lane_s16_(a as _, b.0, b.1, b.2, b.3, LANE, 2)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8x4_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v8i16.p0i8")]
@@ -16553,7 +16553,7 @@ vst4q_lane_s16_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4i32")]
@@ -16572,7 +16572,7 @@ vst4q_lane_s32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v4i32.p0i8")]
@@ -16592,7 +16592,7 @@ vst4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x4_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vst4_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -16607,7 +16607,7 @@ pub unsafe fn vst4_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x4_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vst4_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -16622,7 +16622,7 @@ pub unsafe fn vst4_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x4_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x4_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
transmute(vst4_lane_s32::<LANE>(transmute(a), transmute(b)))
}
@@ -16637,7 +16637,7 @@ pub unsafe fn vst4_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x4_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x4_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vst4q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -16652,7 +16652,7 @@ pub unsafe fn vst4q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x4_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vst4q_lane_s32::<LANE>(transmute(a), transmute(b)))
}
@@ -16667,7 +16667,7 @@ pub unsafe fn vst4q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x4_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x4_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vst4_lane_s8::<LANE>(transmute(a), transmute(b)))
}
@@ -16682,7 +16682,7 @@ pub unsafe fn vst4_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x4_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
transmute(vst4_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -16697,7 +16697,7 @@ pub unsafe fn vst4_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x4_t) {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vst4q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x4_t) {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
transmute(vst4q_lane_s16::<LANE>(transmute(a), transmute(b)))
}
@@ -16710,7 +16710,7 @@ pub unsafe fn vst4q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x4_t) {
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2x4_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v2f32")]
@@ -16729,7 +16729,7 @@ vst4_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2x4_t) {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v2f32.p0i8")]
@@ -16747,7 +16747,7 @@ vst4_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _)
#[cfg_attr(test, assert_instr(vst4, LANE = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn vst4q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst4lane.p0i8.v4f32")]
@@ -16766,7 +16766,7 @@ vst4q_lane_f32_(a as _, b.0, b.1, b.2, b.3, LANE, 4)
#[rustc_legacy_const_generics(2)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vst4q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4x4_t) {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.st4lane.v4f32.p0i8")]
@@ -17136,8 +17136,8 @@ pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- static_assert_imm2!(LANE);
- simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17151,8 +17151,8 @@ pub unsafe fn vmul_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int1
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int16x4_t {
- static_assert_imm3!(LANE);
- simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17166,8 +17166,8 @@ pub unsafe fn vmul_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
- static_assert_imm2!(LANE);
- simd_mul(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17181,8 +17181,8 @@ pub unsafe fn vmulq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- static_assert_imm3!(LANE);
- simd_mul(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17196,8 +17196,8 @@ pub unsafe fn vmulq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> in
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- static_assert_imm1!(LANE);
- simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17211,8 +17211,8 @@ pub unsafe fn vmul_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int3
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int32x2_t {
- static_assert_imm2!(LANE);
- simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17226,8 +17226,8 @@ pub unsafe fn vmul_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
- static_assert_imm1!(LANE);
- simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17241,8 +17241,8 @@ pub unsafe fn vmulq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17256,8 +17256,8 @@ pub unsafe fn vmulq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> in
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- static_assert_imm2!(LANE);
- simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17271,8 +17271,8 @@ pub unsafe fn vmul_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t) -> ui
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t {
- static_assert_imm3!(LANE);
- simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17286,8 +17286,8 @@ pub unsafe fn vmul_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x8_t) -> u
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t {
- static_assert_imm2!(LANE);
- simd_mul(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17301,8 +17301,8 @@ pub unsafe fn vmulq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> u
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- static_assert_imm3!(LANE);
- simd_mul(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17316,8 +17316,8 @@ pub unsafe fn vmulq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- static_assert_imm1!(LANE);
- simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17331,8 +17331,8 @@ pub unsafe fn vmul_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t) -> ui
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t {
- static_assert_imm2!(LANE);
- simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17346,8 +17346,8 @@ pub unsafe fn vmul_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x4_t) -> u
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t {
- static_assert_imm1!(LANE);
- simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Multiply
@@ -17361,8 +17361,8 @@ pub unsafe fn vmulq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> u
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- static_assert_imm2!(LANE);
- simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Floating-point multiply
@@ -17376,8 +17376,8 @@ pub unsafe fn vmulq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
- static_assert_imm1!(LANE);
- simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Floating-point multiply
@@ -17391,8 +17391,8 @@ pub unsafe fn vmul_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmul_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
- static_assert_imm2!(LANE);
- simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Floating-point multiply
@@ -17406,8 +17406,8 @@ pub unsafe fn vmul_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
- static_assert_imm1!(LANE);
- simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Floating-point multiply
@@ -17421,8 +17421,8 @@ pub unsafe fn vmulq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmulq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
- static_assert_imm2!(LANE);
- simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Signed multiply long
@@ -17621,8 +17621,8 @@ pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- vmull_s16(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmull_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
@@ -17636,8 +17636,8 @@ pub unsafe fn vmull_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
- static_assert_imm3!(LANE);
- vmull_s16(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmull_s16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
@@ -17651,8 +17651,8 @@ pub unsafe fn vmull_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> in
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int64x2_t {
- static_assert_imm1!(LANE);
- vmull_s32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
@@ -17666,8 +17666,8 @@ pub unsafe fn vmull_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
- static_assert_imm2!(LANE);
- vmull_s32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmull_s32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
@@ -17681,8 +17681,8 @@ pub unsafe fn vmull_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> in
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t {
- static_assert_imm2!(LANE);
- vmull_u16(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmull_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
@@ -17696,8 +17696,8 @@ pub unsafe fn vmull_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t) -> u
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t {
- static_assert_imm3!(LANE);
- vmull_u16(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 3);
+ vmull_u16(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
@@ -17711,8 +17711,8 @@ pub unsafe fn vmull_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x8_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
- static_assert_imm1!(LANE);
- vmull_u32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 1);
+ vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Vector long multiply by scalar
@@ -17726,8 +17726,8 @@ pub unsafe fn vmull_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t) -> u
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vmull_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t {
- static_assert_imm2!(LANE);
- vmull_u32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]))
+ static_assert_uimm_bits!(LANE, 2);
+ vmull_u32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32]))
}
/// Floating-point fused Multiply-Add to accumulator(vector)
@@ -18268,7 +18268,7 @@ pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
let d: int8x8_t = vsubhn_s16(b, c);
- simd_shuffle16!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Subtract returning high narrow
@@ -18282,7 +18282,7 @@ pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x1
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
let d: int16x4_t = vsubhn_s32(b, c);
- simd_shuffle8!(a, d, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Subtract returning high narrow
@@ -18296,7 +18296,7 @@ pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
let d: int32x2_t = vsubhn_s64(b, c);
- simd_shuffle4!(a, d, [0, 1, 2, 3])
+ simd_shuffle!(a, d, [0, 1, 2, 3])
}
/// Subtract returning high narrow
@@ -18310,7 +18310,7 @@ pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
let d: uint8x8_t = vsubhn_u16(b, c);
- simd_shuffle16!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Subtract returning high narrow
@@ -18324,7 +18324,7 @@ pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uin
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
let d: uint16x4_t = vsubhn_u32(b, c);
- simd_shuffle8!(a, d, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, d, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Subtract returning high narrow
@@ -18338,7 +18338,7 @@ pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> ui
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
let d: uint32x2_t = vsubhn_u64(b, c);
- simd_shuffle4!(a, d, [0, 1, 2, 3])
+ simd_shuffle!(a, d, [0, 1, 2, 3])
}
/// Signed halving subtract
@@ -19439,8 +19439,8 @@ pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmull_lane_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int32x4_t {
- static_assert_imm2!(N);
- let b: int16x4_t = simd_shuffle4!(b, b, <const N: i32> [N as u32, N as u32, N as u32, N as u32]);
+ static_assert_uimm_bits!(N, 2);
+ let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
vqdmull_s16(a, b)
}
@@ -19455,8 +19455,8 @@ pub unsafe fn vqdmull_lane_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int3
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmull_lane_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int64x2_t {
- static_assert_imm1!(N);
- let b: int32x2_t = simd_shuffle2!(b, b, <const N: i32> [N as u32, N as u32]);
+ static_assert_uimm_bits!(N, 1);
+ let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
vqdmull_s32(a, b)
}
@@ -19523,7 +19523,7 @@ pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t {
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlal_lane_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
vqaddq_s32(a, vqdmull_lane_s16::<N>(b, c))
}
@@ -19538,7 +19538,7 @@ pub unsafe fn vqdmlal_lane_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int1
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlal_lane_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
vqaddq_s64(a, vqdmull_lane_s32::<N>(b, c))
}
@@ -19605,7 +19605,7 @@ pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t {
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlsl_lane_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t {
- static_assert_imm2!(N);
+ static_assert_uimm_bits!(N, 2);
vqsubq_s32(a, vqdmull_lane_s16::<N>(b, c))
}
@@ -19620,7 +19620,7 @@ pub unsafe fn vqdmlsl_lane_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int1
#[rustc_legacy_const_generics(3)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmlsl_lane_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
- static_assert_imm1!(N);
+ static_assert_uimm_bits!(N, 1);
vqsubq_s64(a, vqdmull_lane_s32::<N>(b, c))
}
@@ -19767,7 +19767,7 @@ pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulhq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
vqdmulhq_s16(a, vdupq_n_s16(simd_extract(b, LANE as u32)))
}
@@ -19782,7 +19782,7 @@ pub unsafe fn vqdmulhq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int16x4_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
vqdmulh_s16(a, vdup_n_s16(simd_extract(b, LANE as u32)))
}
@@ -19797,7 +19797,7 @@ pub unsafe fn vqdmulh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulhq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqdmulhq_s32(a, vdupq_n_s32(simd_extract(b, LANE as u32)))
}
@@ -19812,7 +19812,7 @@ pub unsafe fn vqdmulhq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqdmulh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int32x2_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
vqdmulh_s32(a, vdup_n_s32(simd_extract(b, LANE as u32)))
}
@@ -20126,8 +20126,8 @@ pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- static_assert_imm2!(LANE);
- let b: int16x4_t = simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 2);
+ let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmulh_s16(a, b)
}
@@ -20142,8 +20142,8 @@ pub unsafe fn vqrdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int16x4_t {
- static_assert_imm3!(LANE);
- let b: int16x4_t = simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 3);
+ let b: int16x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmulh_s16(a, b)
}
@@ -20158,8 +20158,8 @@ pub unsafe fn vqrdmulh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
- static_assert_imm2!(LANE);
- let b: int16x8_t = simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 2);
+ let b: int16x8_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmulhq_s16(a, b)
}
@@ -20174,8 +20174,8 @@ pub unsafe fn vqrdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulhq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- static_assert_imm3!(LANE);
- let b: int16x8_t = simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 3);
+ let b: int16x8_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmulhq_s16(a, b)
}
@@ -20190,8 +20190,8 @@ pub unsafe fn vqrdmulhq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- static_assert_imm1!(LANE);
- let b: int32x2_t = simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 1);
+ let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]);
vqrdmulh_s32(a, b)
}
@@ -20206,8 +20206,8 @@ pub unsafe fn vqrdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int32x2_t {
- static_assert_imm2!(LANE);
- let b: int32x2_t = simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 2);
+ let b: int32x2_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32]);
vqrdmulh_s32(a, b)
}
@@ -20222,8 +20222,8 @@ pub unsafe fn vqrdmulh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
- static_assert_imm1!(LANE);
- let b: int32x4_t = simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 1);
+ let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmulhq_s32(a, b)
}
@@ -20238,8 +20238,8 @@ pub unsafe fn vqrdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) ->
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqrdmulhq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
- let b: int32x4_t = simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
+ static_assert_uimm_bits!(LANE, 2);
+ let b: int32x4_t = simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
vqrdmulhq_s32(a, b)
}
@@ -20556,7 +20556,7 @@ vqrshlq_u64_(a, b)
#[cfg_attr(test, assert_instr(vqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v8i8")]
@@ -20575,7 +20575,7 @@ vqrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i1
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrn.v8i8")]
@@ -20593,7 +20593,7 @@ vqrshrn_n_s16_(a, N)
#[cfg_attr(test, assert_instr(vqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v4i16")]
@@ -20612,7 +20612,7 @@ vqrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrn.v4i16")]
@@ -20630,7 +20630,7 @@ vqrshrn_n_s32_(a, N)
#[cfg_attr(test, assert_instr(vqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftns.v2i32")]
@@ -20649,7 +20649,7 @@ vqrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrn.v2i32")]
@@ -20667,7 +20667,7 @@ vqrshrn_n_s64_(a, N)
#[cfg_attr(test, assert_instr(vqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v8i8")]
@@ -20686,7 +20686,7 @@ vqrshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshrn.v8i8")]
@@ -20704,7 +20704,7 @@ vqrshrn_n_u16_(a, N)
#[cfg_attr(test, assert_instr(vqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v4i16")]
@@ -20723,7 +20723,7 @@ vqrshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshrn.v4i16")]
@@ -20741,7 +20741,7 @@ vqrshrn_n_u32_(a, N)
#[cfg_attr(test, assert_instr(vqrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnu.v2i32")]
@@ -20760,7 +20760,7 @@ vqrshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqrshrn.v2i32")]
@@ -20778,7 +20778,7 @@ vqrshrn_n_u64_(a, N)
#[cfg_attr(test, assert_instr(vqrshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v8i8")]
@@ -20797,7 +20797,7 @@ vqrshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrun.v8i8")]
@@ -20815,7 +20815,7 @@ vqrshrun_n_s16_(a, N)
#[cfg_attr(test, assert_instr(vqrshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v4i16")]
@@ -20834,7 +20834,7 @@ vqrshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrun.v4i16")]
@@ -20852,7 +20852,7 @@ vqrshrun_n_s32_(a, N)
#[cfg_attr(test, assert_instr(vqrshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqrshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqrshiftnsu.v2i32")]
@@ -20871,7 +20871,7 @@ vqrshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqrshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqrshrun.v2i32")]
@@ -21195,7 +21195,7 @@ vqshlq_u64_(a, b)
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
vqshl_s8(a, vdup_n_s8(N as _))
}
@@ -21210,7 +21210,7 @@ pub unsafe fn vqshl_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
vqshlq_s8(a, vdupq_n_s8(N as _))
}
@@ -21225,7 +21225,7 @@ pub unsafe fn vqshlq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
vqshl_s16(a, vdup_n_s16(N as _))
}
@@ -21240,7 +21240,7 @@ pub unsafe fn vqshl_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
vqshlq_s16(a, vdupq_n_s16(N as _))
}
@@ -21255,7 +21255,7 @@ pub unsafe fn vqshlq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
vqshl_s32(a, vdup_n_s32(N as _))
}
@@ -21270,7 +21270,7 @@ pub unsafe fn vqshl_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
vqshlq_s32(a, vdupq_n_s32(N as _))
}
@@ -21285,7 +21285,7 @@ pub unsafe fn vqshlq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
vqshl_s64(a, vdup_n_s64(N as _))
}
@@ -21300,7 +21300,7 @@ pub unsafe fn vqshl_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
vqshlq_s64(a, vdupq_n_s64(N as _))
}
@@ -21315,7 +21315,7 @@ pub unsafe fn vqshlq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
vqshl_u8(a, vdup_n_s8(N as _))
}
@@ -21330,7 +21330,7 @@ pub unsafe fn vqshl_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
vqshlq_u8(a, vdupq_n_s8(N as _))
}
@@ -21345,7 +21345,7 @@ pub unsafe fn vqshlq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
vqshl_u16(a, vdup_n_s16(N as _))
}
@@ -21360,7 +21360,7 @@ pub unsafe fn vqshl_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
vqshlq_u16(a, vdupq_n_s16(N as _))
}
@@ -21375,7 +21375,7 @@ pub unsafe fn vqshlq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
vqshl_u32(a, vdup_n_s32(N as _))
}
@@ -21390,7 +21390,7 @@ pub unsafe fn vqshl_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
vqshlq_u32(a, vdupq_n_s32(N as _))
}
@@ -21405,7 +21405,7 @@ pub unsafe fn vqshlq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshl_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
vqshl_u64(a, vdup_n_s64(N as _))
}
@@ -21420,7 +21420,7 @@ pub unsafe fn vqshl_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vqshlq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
vqshlq_u64(a, vdupq_n_s64(N as _))
}
@@ -21433,7 +21433,7 @@ pub unsafe fn vqshlq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshlu_n_s8<const N: i32>(a: int8x8_t) -> uint8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i8")]
@@ -21452,7 +21452,7 @@ vqshlu_n_s8_(a, int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlu_n_s8<const N: i32>(a: int8x8_t) -> uint8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v8i8")]
@@ -21470,7 +21470,7 @@ vqshlu_n_s8_(a, int8x8_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8, N
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshlu_n_s16<const N: i32>(a: int16x4_t) -> uint16x4_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i16")]
@@ -21489,7 +21489,7 @@ vqshlu_n_s16_(a, int16x4_t(N as i16, N as i16, N as i16, N as i16))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlu_n_s16<const N: i32>(a: int16x4_t) -> uint16x4_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v4i16")]
@@ -21507,7 +21507,7 @@ vqshlu_n_s16_(a, int16x4_t(N as i16, N as i16, N as i16, N as i16))
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshlu_n_s32<const N: i32>(a: int32x2_t) -> uint32x2_t {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i32")]
@@ -21526,7 +21526,7 @@ vqshlu_n_s32_(a, int32x2_t(N as i32, N as i32))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlu_n_s32<const N: i32>(a: int32x2_t) -> uint32x2_t {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v2i32")]
@@ -21544,7 +21544,7 @@ vqshlu_n_s32_(a, int32x2_t(N as i32, N as i32))
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshlu_n_s64<const N: i32>(a: int64x1_t) -> uint64x1_t {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v1i64")]
@@ -21563,7 +21563,7 @@ vqshlu_n_s64_(a, int64x1_t(N as i64))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshlu_n_s64<const N: i32>(a: int64x1_t) -> uint64x1_t {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v1i64")]
@@ -21581,7 +21581,7 @@ vqshlu_n_s64_(a, int64x1_t(N as i64))
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshluq_n_s8<const N: i32>(a: int8x16_t) -> uint8x16_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v16i8")]
@@ -21600,7 +21600,7 @@ vqshluq_n_s8_(a, int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8,
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshluq_n_s8<const N: i32>(a: int8x16_t) -> uint8x16_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v16i8")]
@@ -21618,7 +21618,7 @@ vqshluq_n_s8_(a, int8x16_t(N as i8, N as i8, N as i8, N as i8, N as i8, N as i8,
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshluq_n_s16<const N: i32>(a: int16x8_t) -> uint16x8_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v8i16")]
@@ -21637,7 +21637,7 @@ vqshluq_n_s16_(a, int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshluq_n_s16<const N: i32>(a: int16x8_t) -> uint16x8_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v8i16")]
@@ -21655,7 +21655,7 @@ vqshluq_n_s16_(a, int16x8_t(N as i16, N as i16, N as i16, N as i16, N as i16, N
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshluq_n_s32<const N: i32>(a: int32x4_t) -> uint32x4_t {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v4i32")]
@@ -21674,7 +21674,7 @@ vqshluq_n_s32_(a, int32x4_t(N as i32, N as i32, N as i32, N as i32))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshluq_n_s32<const N: i32>(a: int32x4_t) -> uint32x4_t {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v4i32")]
@@ -21692,7 +21692,7 @@ vqshluq_n_s32_(a, int32x4_t(N as i32, N as i32, N as i32, N as i32))
#[cfg_attr(test, assert_instr(vqshlu, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshluq_n_s64<const N: i32>(a: int64x2_t) -> uint64x2_t {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftsu.v2i64")]
@@ -21711,7 +21711,7 @@ vqshluq_n_s64_(a, int64x2_t(N as i64, N as i64))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshluq_n_s64<const N: i32>(a: int64x2_t) -> uint64x2_t {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshlu.v2i64")]
@@ -21729,7 +21729,7 @@ vqshluq_n_s64_(a, int64x2_t(N as i64, N as i64))
#[cfg_attr(test, assert_instr(vqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v8i8")]
@@ -21748,7 +21748,7 @@ vqshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.v8i8")]
@@ -21766,7 +21766,7 @@ vqshrn_n_s16_(a, N)
#[cfg_attr(test, assert_instr(vqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v4i16")]
@@ -21785,7 +21785,7 @@ vqshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.v4i16")]
@@ -21803,7 +21803,7 @@ vqshrn_n_s32_(a, N)
#[cfg_attr(test, assert_instr(vqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftns.v2i32")]
@@ -21822,7 +21822,7 @@ vqshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrn.v2i32")]
@@ -21840,7 +21840,7 @@ vqshrn_n_s64_(a, N)
#[cfg_attr(test, assert_instr(vqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v8i8")]
@@ -21859,7 +21859,7 @@ vqshrn_n_u16_(a, uint16x8_t(-N as u16, -N as u16, -N as u16, -N as u16, -N as u1
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.v8i8")]
@@ -21877,7 +21877,7 @@ vqshrn_n_u16_(a, N)
#[cfg_attr(test, assert_instr(vqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v4i16")]
@@ -21896,7 +21896,7 @@ vqshrn_n_u32_(a, uint32x4_t(-N as u32, -N as u32, -N as u32, -N as u32))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.v4i16")]
@@ -21914,7 +21914,7 @@ vqshrn_n_u32_(a, N)
#[cfg_attr(test, assert_instr(vqshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnu.v2i32")]
@@ -21933,7 +21933,7 @@ vqshrn_n_u64_(a, uint64x2_t(-N as u64, -N as u64))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.uqshrn.v2i32")]
@@ -21951,7 +21951,7 @@ vqshrn_n_u64_(a, N)
#[cfg_attr(test, assert_instr(vqshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v8i8")]
@@ -21970,7 +21970,7 @@ vqshrun_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i1
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrun_n_s16<const N: i32>(a: int16x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrun.v8i8")]
@@ -21988,7 +21988,7 @@ vqshrun_n_s16_(a, N)
#[cfg_attr(test, assert_instr(vqshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v4i16")]
@@ -22007,7 +22007,7 @@ vqshrun_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrun_n_s32<const N: i32>(a: int32x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrun.v4i16")]
@@ -22025,7 +22025,7 @@ vqshrun_n_s32_(a, N)
#[cfg_attr(test, assert_instr(vqshrun, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vqshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vqshiftnsu.v2i32")]
@@ -22044,7 +22044,7 @@ vqshrun_n_s64_(a, int64x2_t(-N as i64, -N as i64))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vqshrun_n_s64<const N: i32>(a: int64x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.sqshrun.v2i32")]
@@ -23196,7 +23196,7 @@ pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23209,7 +23209,7 @@ pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23222,7 +23222,7 @@ pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23235,7 +23235,7 @@ pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23248,7 +23248,7 @@ pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23261,7 +23261,7 @@ pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23274,7 +23274,7 @@ pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23781,7 +23781,7 @@ pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23794,7 +23794,7 @@ pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23807,7 +23807,7 @@ pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23820,7 +23820,7 @@ pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23833,7 +23833,7 @@ pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23846,7 +23846,7 @@ pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -23859,7 +23859,7 @@ pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24184,7 +24184,7 @@ pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24197,7 +24197,7 @@ pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24210,7 +24210,7 @@ pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24223,7 +24223,7 @@ pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24236,7 +24236,7 @@ pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24249,7 +24249,7 @@ pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24262,7 +24262,7 @@ pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24275,7 +24275,7 @@ pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24600,7 +24600,7 @@ pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24613,7 +24613,7 @@ pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24626,7 +24626,7 @@ pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24639,7 +24639,7 @@ pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24652,7 +24652,7 @@ pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24665,7 +24665,7 @@ pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24678,7 +24678,7 @@ pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24691,7 +24691,7 @@ pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24860,7 +24860,7 @@ pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24873,7 +24873,7 @@ pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24886,7 +24886,7 @@ pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24899,7 +24899,7 @@ pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24912,7 +24912,7 @@ pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24925,7 +24925,7 @@ pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24938,7 +24938,7 @@ pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24951,7 +24951,7 @@ pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -24964,7 +24964,7 @@ pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25133,7 +25133,7 @@ pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25146,7 +25146,7 @@ pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25159,7 +25159,7 @@ pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25172,7 +25172,7 @@ pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25185,7 +25185,7 @@ pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25198,7 +25198,7 @@ pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25211,7 +25211,7 @@ pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25224,7 +25224,7 @@ pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25237,7 +25237,7 @@ pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25250,7 +25250,7 @@ pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25263,7 +25263,7 @@ pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25276,7 +25276,7 @@ pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25289,7 +25289,7 @@ pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25302,7 +25302,7 @@ pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -25315,7 +25315,7 @@ pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t {
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
@@ -26184,7 +26184,7 @@ vrshlq_u64_(a, b)
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
vrshl_s8(a, vdup_n_s8((-N) as _))
}
@@ -26199,7 +26199,7 @@ pub unsafe fn vrshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
vrshlq_s8(a, vdupq_n_s8((-N) as _))
}
@@ -26214,7 +26214,7 @@ pub unsafe fn vrshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
vrshl_s16(a, vdup_n_s16((-N) as _))
}
@@ -26229,7 +26229,7 @@ pub unsafe fn vrshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
vrshlq_s16(a, vdupq_n_s16((-N) as _))
}
@@ -26244,7 +26244,7 @@ pub unsafe fn vrshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
vrshl_s32(a, vdup_n_s32((-N) as _))
}
@@ -26259,7 +26259,7 @@ pub unsafe fn vrshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
vrshlq_s32(a, vdupq_n_s32((-N) as _))
}
@@ -26274,7 +26274,7 @@ pub unsafe fn vrshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
vrshl_s64(a, vdup_n_s64((-N) as _))
}
@@ -26289,7 +26289,7 @@ pub unsafe fn vrshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
vrshlq_s64(a, vdupq_n_s64((-N) as _))
}
@@ -26304,7 +26304,7 @@ pub unsafe fn vrshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
vrshl_u8(a, vdup_n_s8((-N) as _))
}
@@ -26319,7 +26319,7 @@ pub unsafe fn vrshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
vrshlq_u8(a, vdupq_n_s8((-N) as _))
}
@@ -26334,7 +26334,7 @@ pub unsafe fn vrshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
vrshl_u16(a, vdup_n_s16((-N) as _))
}
@@ -26349,7 +26349,7 @@ pub unsafe fn vrshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
vrshlq_u16(a, vdupq_n_s16((-N) as _))
}
@@ -26364,7 +26364,7 @@ pub unsafe fn vrshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
vrshl_u32(a, vdup_n_s32((-N) as _))
}
@@ -26379,7 +26379,7 @@ pub unsafe fn vrshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
vrshlq_u32(a, vdupq_n_s32((-N) as _))
}
@@ -26394,7 +26394,7 @@ pub unsafe fn vrshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
vrshl_u64(a, vdup_n_s64((-N) as _))
}
@@ -26409,7 +26409,7 @@ pub unsafe fn vrshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
vrshlq_u64(a, vdupq_n_s64((-N) as _))
}
@@ -26422,7 +26422,7 @@ pub unsafe fn vrshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
#[cfg_attr(test, assert_instr(vrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v8i8")]
@@ -26441,7 +26441,7 @@ vrshrn_n_s16_(a, int16x8_t(-N as i16, -N as i16, -N as i16, -N as i16, -N as i16
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rshrn.v8i8")]
@@ -26459,7 +26459,7 @@ vrshrn_n_s16_(a, N)
#[cfg_attr(test, assert_instr(vrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v4i16")]
@@ -26478,7 +26478,7 @@ vrshrn_n_s32_(a, int32x4_t(-N as i32, -N as i32, -N as i32, -N as i32))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rshrn.v4i16")]
@@ -26496,7 +26496,7 @@ vrshrn_n_s32_(a, N)
#[cfg_attr(test, assert_instr(vrshrn, N = 2))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn vrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vrshiftn.v2i32")]
@@ -26515,7 +26515,7 @@ vrshrn_n_s64_(a, int64x2_t(-N as i64, -N as i64))
#[rustc_legacy_const_generics(1)]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vrshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
#[allow(improper_ctypes)]
extern "unadjusted" {
#[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.neon.rshrn.v2i32")]
@@ -26535,7 +26535,7 @@ vrshrn_n_s64_(a, N)
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
transmute(vrshrn_n_s16::<N>(transmute(a)))
}
@@ -26550,7 +26550,7 @@ pub unsafe fn vrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
transmute(vrshrn_n_s32::<N>(transmute(a)))
}
@@ -26565,7 +26565,7 @@ pub unsafe fn vrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
transmute(vrshrn_n_s64::<N>(transmute(a)))
}
@@ -26580,7 +26580,7 @@ pub unsafe fn vrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_add(a, vrshr_n_s8::<N>(b))
}
@@ -26595,7 +26595,7 @@ pub unsafe fn vrsra_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_add(a, vrshrq_n_s8::<N>(b))
}
@@ -26610,7 +26610,7 @@ pub unsafe fn vrsraq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_add(a, vrshr_n_s16::<N>(b))
}
@@ -26625,7 +26625,7 @@ pub unsafe fn vrsra_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_add(a, vrshrq_n_s16::<N>(b))
}
@@ -26640,7 +26640,7 @@ pub unsafe fn vrsraq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
simd_add(a, vrshr_n_s32::<N>(b))
}
@@ -26655,7 +26655,7 @@ pub unsafe fn vrsra_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
simd_add(a, vrshrq_n_s32::<N>(b))
}
@@ -26670,7 +26670,7 @@ pub unsafe fn vrsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
simd_add(a, vrshr_n_s64::<N>(b))
}
@@ -26685,7 +26685,7 @@ pub unsafe fn vrsra_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
simd_add(a, vrshrq_n_s64::<N>(b))
}
@@ -26700,7 +26700,7 @@ pub unsafe fn vrsraq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_add(a, vrshr_n_u8::<N>(b))
}
@@ -26715,7 +26715,7 @@ pub unsafe fn vrsra_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_add(a, vrshrq_n_u8::<N>(b))
}
@@ -26730,7 +26730,7 @@ pub unsafe fn vrsraq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x1
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_add(a, vrshr_n_u16::<N>(b))
}
@@ -26745,7 +26745,7 @@ pub unsafe fn vrsra_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_add(a, vrshrq_n_u16::<N>(b))
}
@@ -26760,7 +26760,7 @@ pub unsafe fn vrsraq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
simd_add(a, vrshr_n_u32::<N>(b))
}
@@ -26775,7 +26775,7 @@ pub unsafe fn vrsra_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
simd_add(a, vrshrq_n_u32::<N>(b))
}
@@ -26790,7 +26790,7 @@ pub unsafe fn vrsraq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsra_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
simd_add(a, vrshr_n_u64::<N>(b))
}
@@ -26805,7 +26805,7 @@ pub unsafe fn vrsra_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vrsraq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
simd_add(a, vrshrq_n_u64::<N>(b))
}
@@ -26916,7 +26916,7 @@ pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_s8<const LANE: i32>(a: i8, b: int8x8_t) -> int8x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
simd_insert(b, LANE as u32, a)
}
@@ -26931,7 +26931,7 @@ pub unsafe fn vset_lane_s8<const LANE: i32>(a: i8, b: int8x8_t) -> int8x8_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> int16x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
simd_insert(b, LANE as u32, a)
}
@@ -26946,7 +26946,7 @@ pub unsafe fn vset_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> int16x4_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> int32x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(b, LANE as u32, a)
}
@@ -26961,7 +26961,7 @@ pub unsafe fn vset_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> int32x2_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_s64<const LANE: i32>(a: i64, b: int64x1_t) -> int64x1_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
simd_insert(b, LANE as u32, a)
}
@@ -26976,7 +26976,7 @@ pub unsafe fn vset_lane_s64<const LANE: i32>(a: i64, b: int64x1_t) -> int64x1_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_u8<const LANE: i32>(a: u8, b: uint8x8_t) -> uint8x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
simd_insert(b, LANE as u32, a)
}
@@ -26991,7 +26991,7 @@ pub unsafe fn vset_lane_u8<const LANE: i32>(a: u8, b: uint8x8_t) -> uint8x8_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_u16<const LANE: i32>(a: u16, b: uint16x4_t) -> uint16x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
simd_insert(b, LANE as u32, a)
}
@@ -27006,7 +27006,7 @@ pub unsafe fn vset_lane_u16<const LANE: i32>(a: u16, b: uint16x4_t) -> uint16x4_
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_u32<const LANE: i32>(a: u32, b: uint32x2_t) -> uint32x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(b, LANE as u32, a)
}
@@ -27021,7 +27021,7 @@ pub unsafe fn vset_lane_u32<const LANE: i32>(a: u32, b: uint32x2_t) -> uint32x2_
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_u64<const LANE: i32>(a: u64, b: uint64x1_t) -> uint64x1_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
simd_insert(b, LANE as u32, a)
}
@@ -27036,7 +27036,7 @@ pub unsafe fn vset_lane_u64<const LANE: i32>(a: u64, b: uint64x1_t) -> uint64x1_
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_p8<const LANE: i32>(a: p8, b: poly8x8_t) -> poly8x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
simd_insert(b, LANE as u32, a)
}
@@ -27051,7 +27051,7 @@ pub unsafe fn vset_lane_p8<const LANE: i32>(a: p8, b: poly8x8_t) -> poly8x8_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_p16<const LANE: i32>(a: p16, b: poly16x4_t) -> poly16x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
simd_insert(b, LANE as u32, a)
}
@@ -27060,13 +27060,13 @@ pub unsafe fn vset_lane_p16<const LANE: i32>(a: p16, b: poly16x4_t) -> poly16x4_
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_p64<const LANE: i32>(a: p64, b: poly64x1_t) -> poly64x1_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
simd_insert(b, LANE as u32, a)
}
@@ -27081,7 +27081,7 @@ pub unsafe fn vset_lane_p64<const LANE: i32>(a: p64, b: poly64x1_t) -> poly64x1_
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_s8<const LANE: i32>(a: i8, b: int8x16_t) -> int8x16_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
simd_insert(b, LANE as u32, a)
}
@@ -27096,7 +27096,7 @@ pub unsafe fn vsetq_lane_s8<const LANE: i32>(a: i8, b: int8x16_t) -> int8x16_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_s16<const LANE: i32>(a: i16, b: int16x8_t) -> int16x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
simd_insert(b, LANE as u32, a)
}
@@ -27111,7 +27111,7 @@ pub unsafe fn vsetq_lane_s16<const LANE: i32>(a: i16, b: int16x8_t) -> int16x8_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_s32<const LANE: i32>(a: i32, b: int32x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
simd_insert(b, LANE as u32, a)
}
@@ -27126,7 +27126,7 @@ pub unsafe fn vsetq_lane_s32<const LANE: i32>(a: i32, b: int32x4_t) -> int32x4_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_s64<const LANE: i32>(a: i64, b: int64x2_t) -> int64x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(b, LANE as u32, a)
}
@@ -27141,7 +27141,7 @@ pub unsafe fn vsetq_lane_s64<const LANE: i32>(a: i64, b: int64x2_t) -> int64x2_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_u8<const LANE: i32>(a: u8, b: uint8x16_t) -> uint8x16_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
simd_insert(b, LANE as u32, a)
}
@@ -27156,7 +27156,7 @@ pub unsafe fn vsetq_lane_u8<const LANE: i32>(a: u8, b: uint8x16_t) -> uint8x16_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_u16<const LANE: i32>(a: u16, b: uint16x8_t) -> uint16x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
simd_insert(b, LANE as u32, a)
}
@@ -27171,7 +27171,7 @@ pub unsafe fn vsetq_lane_u16<const LANE: i32>(a: u16, b: uint16x8_t) -> uint16x8
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_u32<const LANE: i32>(a: u32, b: uint32x4_t) -> uint32x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
simd_insert(b, LANE as u32, a)
}
@@ -27186,7 +27186,7 @@ pub unsafe fn vsetq_lane_u32<const LANE: i32>(a: u32, b: uint32x4_t) -> uint32x4
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_u64<const LANE: i32>(a: u64, b: uint64x2_t) -> uint64x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(b, LANE as u32, a)
}
@@ -27201,7 +27201,7 @@ pub unsafe fn vsetq_lane_u64<const LANE: i32>(a: u64, b: uint64x2_t) -> uint64x2
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_p8<const LANE: i32>(a: p8, b: poly8x16_t) -> poly8x16_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
simd_insert(b, LANE as u32, a)
}
@@ -27216,7 +27216,7 @@ pub unsafe fn vsetq_lane_p8<const LANE: i32>(a: p8, b: poly8x16_t) -> poly8x16_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_p16<const LANE: i32>(a: p16, b: poly16x8_t) -> poly16x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
simd_insert(b, LANE as u32, a)
}
@@ -27225,13 +27225,13 @@ pub unsafe fn vsetq_lane_p16<const LANE: i32>(a: p16, b: poly16x8_t) -> poly16x8
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)
#[inline]
#[target_feature(enable = "neon,aes")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))]
+#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))]
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))]
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_p64<const LANE: i32>(a: p64, b: poly64x2_t) -> poly64x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(b, LANE as u32, a)
}
@@ -27246,7 +27246,7 @@ pub unsafe fn vsetq_lane_p64<const LANE: i32>(a: p64, b: poly64x2_t) -> poly64x2
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vset_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> float32x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(b, LANE as u32, a)
}
@@ -27261,7 +27261,7 @@ pub unsafe fn vset_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> float32x
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsetq_lane_f32<const LANE: i32>(a: f32, b: float32x4_t) -> float32x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
simd_insert(b, LANE as u32, a)
}
@@ -27580,7 +27580,7 @@ vshlq_u64_(a, b)
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_shl(a, vdup_n_s8(N as _))
}
@@ -27595,7 +27595,7 @@ pub unsafe fn vshl_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_shl(a, vdupq_n_s8(N as _))
}
@@ -27610,7 +27610,7 @@ pub unsafe fn vshlq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
simd_shl(a, vdup_n_s16(N as _))
}
@@ -27625,7 +27625,7 @@ pub unsafe fn vshl_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
simd_shl(a, vdupq_n_s16(N as _))
}
@@ -27640,7 +27640,7 @@ pub unsafe fn vshlq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
simd_shl(a, vdup_n_s32(N as _))
}
@@ -27655,7 +27655,7 @@ pub unsafe fn vshl_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
simd_shl(a, vdupq_n_s32(N as _))
}
@@ -27670,7 +27670,7 @@ pub unsafe fn vshlq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_shl(a, vdup_n_u8(N as _))
}
@@ -27685,7 +27685,7 @@ pub unsafe fn vshl_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
- static_assert_imm3!(N);
+ static_assert_uimm_bits!(N, 3);
simd_shl(a, vdupq_n_u8(N as _))
}
@@ -27700,7 +27700,7 @@ pub unsafe fn vshlq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
simd_shl(a, vdup_n_u16(N as _))
}
@@ -27715,7 +27715,7 @@ pub unsafe fn vshl_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
- static_assert_imm4!(N);
+ static_assert_uimm_bits!(N, 4);
simd_shl(a, vdupq_n_u16(N as _))
}
@@ -27730,7 +27730,7 @@ pub unsafe fn vshlq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
simd_shl(a, vdup_n_u32(N as _))
}
@@ -27745,7 +27745,7 @@ pub unsafe fn vshl_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
- static_assert_imm5!(N);
+ static_assert_uimm_bits!(N, 5);
simd_shl(a, vdupq_n_u32(N as _))
}
@@ -27760,7 +27760,7 @@ pub unsafe fn vshlq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
simd_shl(a, vdup_n_s64(N as _))
}
@@ -27775,7 +27775,7 @@ pub unsafe fn vshl_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
simd_shl(a, vdupq_n_s64(N as _))
}
@@ -27790,7 +27790,7 @@ pub unsafe fn vshlq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshl_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
simd_shl(a, vdup_n_u64(N as _))
}
@@ -27805,7 +27805,7 @@ pub unsafe fn vshl_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshlq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
- static_assert_imm6!(N);
+ static_assert_uimm_bits!(N, 6);
simd_shl(a, vdupq_n_u64(N as _))
}
@@ -27820,7 +27820,7 @@ pub unsafe fn vshlq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshll_n_s8<const N: i32>(a: int8x8_t) -> int16x8_t {
- static_assert!(N : i32 where N >= 0 && N <= 8);
+ static_assert!(N >= 0 && N <= 8);
simd_shl(simd_cast(a), vdupq_n_s16(N as _))
}
@@ -27835,7 +27835,7 @@ pub unsafe fn vshll_n_s8<const N: i32>(a: int8x8_t) -> int16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshll_n_s16<const N: i32>(a: int16x4_t) -> int32x4_t {
- static_assert!(N : i32 where N >= 0 && N <= 16);
+ static_assert!(N >= 0 && N <= 16);
simd_shl(simd_cast(a), vdupq_n_s32(N as _))
}
@@ -27850,7 +27850,7 @@ pub unsafe fn vshll_n_s16<const N: i32>(a: int16x4_t) -> int32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshll_n_s32<const N: i32>(a: int32x2_t) -> int64x2_t {
- static_assert!(N : i32 where N >= 0 && N <= 32);
+ static_assert!(N >= 0 && N <= 32);
simd_shl(simd_cast(a), vdupq_n_s64(N as _))
}
@@ -27865,7 +27865,7 @@ pub unsafe fn vshll_n_s32<const N: i32>(a: int32x2_t) -> int64x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshll_n_u8<const N: i32>(a: uint8x8_t) -> uint16x8_t {
- static_assert!(N : i32 where N >= 0 && N <= 8);
+ static_assert!(N >= 0 && N <= 8);
simd_shl(simd_cast(a), vdupq_n_u16(N as _))
}
@@ -27880,7 +27880,7 @@ pub unsafe fn vshll_n_u8<const N: i32>(a: uint8x8_t) -> uint16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshll_n_u16<const N: i32>(a: uint16x4_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 0 && N <= 16);
+ static_assert!(N >= 0 && N <= 16);
simd_shl(simd_cast(a), vdupq_n_u32(N as _))
}
@@ -27895,7 +27895,7 @@ pub unsafe fn vshll_n_u16<const N: i32>(a: uint16x4_t) -> uint32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshll_n_u32<const N: i32>(a: uint32x2_t) -> uint64x2_t {
- static_assert!(N : i32 where N >= 0 && N <= 32);
+ static_assert!(N >= 0 && N <= 32);
simd_shl(simd_cast(a), vdupq_n_u64(N as _))
}
@@ -27910,7 +27910,7 @@ pub unsafe fn vshll_n_u32<const N: i32>(a: uint32x2_t) -> uint64x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
let n: i32 = if N == 8 { 7 } else { N };
simd_shr(a, vdup_n_s8(n as _))
}
@@ -27926,7 +27926,7 @@ pub unsafe fn vshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
let n: i32 = if N == 8 { 7 } else { N };
simd_shr(a, vdupq_n_s8(n as _))
}
@@ -27942,7 +27942,7 @@ pub unsafe fn vshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
let n: i32 = if N == 16 { 15 } else { N };
simd_shr(a, vdup_n_s16(n as _))
}
@@ -27958,7 +27958,7 @@ pub unsafe fn vshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
let n: i32 = if N == 16 { 15 } else { N };
simd_shr(a, vdupq_n_s16(n as _))
}
@@ -27974,7 +27974,7 @@ pub unsafe fn vshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
let n: i32 = if N == 32 { 31 } else { N };
simd_shr(a, vdup_n_s32(n as _))
}
@@ -27990,7 +27990,7 @@ pub unsafe fn vshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
let n: i32 = if N == 32 { 31 } else { N };
simd_shr(a, vdupq_n_s32(n as _))
}
@@ -28006,7 +28006,7 @@ pub unsafe fn vshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
let n: i32 = if N == 64 { 63 } else { N };
simd_shr(a, vdup_n_s64(n as _))
}
@@ -28022,7 +28022,7 @@ pub unsafe fn vshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
let n: i32 = if N == 64 { 63 } else { N };
simd_shr(a, vdupq_n_s64(n as _))
}
@@ -28038,7 +28038,7 @@ pub unsafe fn vshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
let n: i32 = if N == 8 { return vdup_n_u8(0); } else { N };
simd_shr(a, vdup_n_u8(n as _))
}
@@ -28054,7 +28054,7 @@ pub unsafe fn vshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
let n: i32 = if N == 8 { return vdupq_n_u8(0); } else { N };
simd_shr(a, vdupq_n_u8(n as _))
}
@@ -28070,7 +28070,7 @@ pub unsafe fn vshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
let n: i32 = if N == 16 { return vdup_n_u16(0); } else { N };
simd_shr(a, vdup_n_u16(n as _))
}
@@ -28086,7 +28086,7 @@ pub unsafe fn vshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
let n: i32 = if N == 16 { return vdupq_n_u16(0); } else { N };
simd_shr(a, vdupq_n_u16(n as _))
}
@@ -28102,7 +28102,7 @@ pub unsafe fn vshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
let n: i32 = if N == 32 { return vdup_n_u32(0); } else { N };
simd_shr(a, vdup_n_u32(n as _))
}
@@ -28118,7 +28118,7 @@ pub unsafe fn vshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
let n: i32 = if N == 32 { return vdupq_n_u32(0); } else { N };
simd_shr(a, vdupq_n_u32(n as _))
}
@@ -28134,7 +28134,7 @@ pub unsafe fn vshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
let n: i32 = if N == 64 { return vdup_n_u64(0); } else { N };
simd_shr(a, vdup_n_u64(n as _))
}
@@ -28150,7 +28150,7 @@ pub unsafe fn vshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
let n: i32 = if N == 64 { return vdupq_n_u64(0); } else { N };
simd_shr(a, vdupq_n_u64(n as _))
}
@@ -28166,7 +28166,7 @@ pub unsafe fn vshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_cast(simd_shr(a, vdupq_n_s16(N as _)))
}
@@ -28181,7 +28181,7 @@ pub unsafe fn vshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_cast(simd_shr(a, vdupq_n_s32(N as _)))
}
@@ -28196,7 +28196,7 @@ pub unsafe fn vshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
simd_cast(simd_shr(a, vdupq_n_s64(N as _)))
}
@@ -28211,7 +28211,7 @@ pub unsafe fn vshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_cast(simd_shr(a, vdupq_n_u16(N as _)))
}
@@ -28226,7 +28226,7 @@ pub unsafe fn vshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_cast(simd_shr(a, vdupq_n_u32(N as _)))
}
@@ -28241,7 +28241,7 @@ pub unsafe fn vshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t {
#[rustc_legacy_const_generics(1)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
simd_cast(simd_shr(a, vdupq_n_u64(N as _)))
}
@@ -28256,7 +28256,7 @@ pub unsafe fn vshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_add(a, vshr_n_s8::<N>(b))
}
@@ -28271,7 +28271,7 @@ pub unsafe fn vsra_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_add(a, vshrq_n_s8::<N>(b))
}
@@ -28286,7 +28286,7 @@ pub unsafe fn vsraq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_add(a, vshr_n_s16::<N>(b))
}
@@ -28301,7 +28301,7 @@ pub unsafe fn vsra_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_add(a, vshrq_n_s16::<N>(b))
}
@@ -28316,7 +28316,7 @@ pub unsafe fn vsraq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
simd_add(a, vshr_n_s32::<N>(b))
}
@@ -28331,7 +28331,7 @@ pub unsafe fn vsra_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
simd_add(a, vshrq_n_s32::<N>(b))
}
@@ -28346,7 +28346,7 @@ pub unsafe fn vsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
simd_add(a, vshr_n_s64::<N>(b))
}
@@ -28361,7 +28361,7 @@ pub unsafe fn vsra_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
simd_add(a, vshrq_n_s64::<N>(b))
}
@@ -28376,7 +28376,7 @@ pub unsafe fn vsraq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_add(a, vshr_n_u8::<N>(b))
}
@@ -28391,7 +28391,7 @@ pub unsafe fn vsra_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
- static_assert!(N : i32 where N >= 1 && N <= 8);
+ static_assert!(N >= 1 && N <= 8);
simd_add(a, vshrq_n_u8::<N>(b))
}
@@ -28406,7 +28406,7 @@ pub unsafe fn vsraq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_add(a, vshr_n_u16::<N>(b))
}
@@ -28421,7 +28421,7 @@ pub unsafe fn vsra_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
- static_assert!(N : i32 where N >= 1 && N <= 16);
+ static_assert!(N >= 1 && N <= 16);
simd_add(a, vshrq_n_u16::<N>(b))
}
@@ -28436,7 +28436,7 @@ pub unsafe fn vsraq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
simd_add(a, vshr_n_u32::<N>(b))
}
@@ -28451,7 +28451,7 @@ pub unsafe fn vsra_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
- static_assert!(N : i32 where N >= 1 && N <= 32);
+ static_assert!(N >= 1 && N <= 32);
simd_add(a, vshrq_n_u32::<N>(b))
}
@@ -28466,7 +28466,7 @@ pub unsafe fn vsraq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsra_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
simd_add(a, vshr_n_u64::<N>(b))
}
@@ -28481,7 +28481,7 @@ pub unsafe fn vsra_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1
#[rustc_legacy_const_generics(2)]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vsraq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
- static_assert!(N : i32 where N >= 1 && N <= 64);
+ static_assert!(N >= 1 && N <= 64);
simd_add(a, vshrq_n_u64::<N>(b))
}
@@ -28495,8 +28495,8 @@ pub unsafe fn vsraq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t {
- let a1: int8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
- let b1: int8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
+ let a1: int8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
+ let b1: int8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
transmute((a1, b1))
}
@@ -28510,8 +28510,8 @@ pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t {
- let a1: int16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]);
- let b1: int16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]);
+ let a1: int16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]);
+ let b1: int16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]);
transmute((a1, b1))
}
@@ -28525,8 +28525,8 @@ pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
- let a1: int8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]);
- let b1: int8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]);
+ let a1: int8x16_t = simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]);
+ let b1: int8x16_t = simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]);
transmute((a1, b1))
}
@@ -28540,8 +28540,8 @@ pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
- let a1: int16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
- let b1: int16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
+ let a1: int16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
+ let b1: int16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
transmute((a1, b1))
}
@@ -28555,8 +28555,8 @@ pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
- let a1: int32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]);
- let b1: int32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]);
+ let a1: int32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]);
+ let b1: int32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]);
transmute((a1, b1))
}
@@ -28570,8 +28570,8 @@ pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t {
- let a1: uint8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
- let b1: uint8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
+ let a1: uint8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
+ let b1: uint8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
transmute((a1, b1))
}
@@ -28585,8 +28585,8 @@ pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t {
- let a1: uint16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]);
- let b1: uint16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]);
+ let a1: uint16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]);
+ let b1: uint16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]);
transmute((a1, b1))
}
@@ -28600,8 +28600,8 @@ pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
- let a1: uint8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]);
- let b1: uint8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]);
+ let a1: uint8x16_t = simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]);
+ let b1: uint8x16_t = simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]);
transmute((a1, b1))
}
@@ -28615,8 +28615,8 @@ pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
- let a1: uint16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
- let b1: uint16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
+ let a1: uint16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
+ let b1: uint16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
transmute((a1, b1))
}
@@ -28630,8 +28630,8 @@ pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
- let a1: uint32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]);
- let b1: uint32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]);
+ let a1: uint32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]);
+ let b1: uint32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]);
transmute((a1, b1))
}
@@ -28645,8 +28645,8 @@ pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t {
- let a1: poly8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
- let b1: poly8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
+ let a1: poly8x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
+ let b1: poly8x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
transmute((a1, b1))
}
@@ -28660,8 +28660,8 @@ pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t {
- let a1: poly16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]);
- let b1: poly16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]);
+ let a1: poly16x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]);
+ let b1: poly16x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]);
transmute((a1, b1))
}
@@ -28675,8 +28675,8 @@ pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
- let a1: poly8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]);
- let b1: poly8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]);
+ let a1: poly8x16_t = simd_shuffle!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]);
+ let b1: poly8x16_t = simd_shuffle!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]);
transmute((a1, b1))
}
@@ -28690,8 +28690,8 @@ pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
- let a1: poly16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
- let b1: poly16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
+ let a1: poly16x8_t = simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]);
+ let b1: poly16x8_t = simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]);
transmute((a1, b1))
}
@@ -28705,8 +28705,8 @@ pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t {
- let a1: int32x2_t = simd_shuffle2!(a, b, [0, 2]);
- let b1: int32x2_t = simd_shuffle2!(a, b, [1, 3]);
+ let a1: int32x2_t = simd_shuffle!(a, b, [0, 2]);
+ let b1: int32x2_t = simd_shuffle!(a, b, [1, 3]);
transmute((a1, b1))
}
@@ -28720,8 +28720,8 @@ pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
- let a1: uint32x2_t = simd_shuffle2!(a, b, [0, 2]);
- let b1: uint32x2_t = simd_shuffle2!(a, b, [1, 3]);
+ let a1: uint32x2_t = simd_shuffle!(a, b, [0, 2]);
+ let b1: uint32x2_t = simd_shuffle!(a, b, [1, 3]);
transmute((a1, b1))
}
@@ -28735,8 +28735,8 @@ pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
- let a1: float32x2_t = simd_shuffle2!(a, b, [0, 2]);
- let b1: float32x2_t = simd_shuffle2!(a, b, [1, 3]);
+ let a1: float32x2_t = simd_shuffle!(a, b, [0, 2]);
+ let b1: float32x2_t = simd_shuffle!(a, b, [1, 3]);
transmute((a1, b1))
}
@@ -28750,8 +28750,8 @@ pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t {
- let a1: float32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]);
- let b1: float32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]);
+ let a1: float32x4_t = simd_shuffle!(a, b, [0, 4, 2, 6]);
+ let b1: float32x4_t = simd_shuffle!(a, b, [1, 5, 3, 7]);
transmute((a1, b1))
}
@@ -28765,8 +28765,8 @@ pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t {
- let a0: int8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
- let b0: int8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
+ let a0: int8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
+ let b0: int8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
transmute((a0, b0))
}
@@ -28780,8 +28780,8 @@ pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t {
- let a0: int16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]);
- let b0: int16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]);
+ let a0: int16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]);
+ let b0: int16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]);
transmute((a0, b0))
}
@@ -28795,8 +28795,8 @@ pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t {
- let a0: uint8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
- let b0: uint8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
+ let a0: uint8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
+ let b0: uint8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
transmute((a0, b0))
}
@@ -28810,8 +28810,8 @@ pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t {
- let a0: uint16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]);
- let b0: uint16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]);
+ let a0: uint16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]);
+ let b0: uint16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]);
transmute((a0, b0))
}
@@ -28825,8 +28825,8 @@ pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t {
- let a0: poly8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
- let b0: poly8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
+ let a0: poly8x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
+ let b0: poly8x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
transmute((a0, b0))
}
@@ -28840,8 +28840,8 @@ pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t {
- let a0: poly16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]);
- let b0: poly16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]);
+ let a0: poly16x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]);
+ let b0: poly16x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]);
transmute((a0, b0))
}
@@ -28855,8 +28855,8 @@ pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t {
- let a0: int32x2_t = simd_shuffle2!(a, b, [0, 2]);
- let b0: int32x2_t = simd_shuffle2!(a, b, [1, 3]);
+ let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]);
+ let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]);
transmute((a0, b0))
}
@@ -28870,8 +28870,8 @@ pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
- let a0: uint32x2_t = simd_shuffle2!(a, b, [0, 2]);
- let b0: uint32x2_t = simd_shuffle2!(a, b, [1, 3]);
+ let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]);
+ let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]);
transmute((a0, b0))
}
@@ -28885,8 +28885,8 @@ pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
- let a0: int8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]);
- let b0: int8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]);
+ let a0: int8x16_t = simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]);
+ let b0: int8x16_t = simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]);
transmute((a0, b0))
}
@@ -28900,8 +28900,8 @@ pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
- let a0: int16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
- let b0: int16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
+ let a0: int16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
+ let b0: int16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
transmute((a0, b0))
}
@@ -28915,8 +28915,8 @@ pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
- let a0: int32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]);
- let b0: int32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]);
+ let a0: int32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]);
+ let b0: int32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]);
transmute((a0, b0))
}
@@ -28930,8 +28930,8 @@ pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
- let a0: uint8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]);
- let b0: uint8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]);
+ let a0: uint8x16_t = simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]);
+ let b0: uint8x16_t = simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]);
transmute((a0, b0))
}
@@ -28945,8 +28945,8 @@ pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
- let a0: uint16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
- let b0: uint16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
+ let a0: uint16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
+ let b0: uint16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
transmute((a0, b0))
}
@@ -28960,8 +28960,8 @@ pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
- let a0: uint32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]);
- let b0: uint32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]);
+ let a0: uint32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]);
+ let b0: uint32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]);
transmute((a0, b0))
}
@@ -28975,8 +28975,8 @@ pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
- let a0: poly8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]);
- let b0: poly8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]);
+ let a0: poly8x16_t = simd_shuffle!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]);
+ let b0: poly8x16_t = simd_shuffle!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]);
transmute((a0, b0))
}
@@ -28990,8 +28990,8 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
- let a0: poly16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
- let b0: poly16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
+ let a0: poly16x8_t = simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]);
+ let b0: poly16x8_t = simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]);
transmute((a0, b0))
}
@@ -29005,8 +29005,8 @@ pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
- let a0: float32x2_t = simd_shuffle2!(a, b, [0, 2]);
- let b0: float32x2_t = simd_shuffle2!(a, b, [1, 3]);
+ let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]);
+ let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]);
transmute((a0, b0))
}
@@ -29020,8 +29020,8 @@ pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t {
- let a0: float32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]);
- let b0: float32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]);
+ let a0: float32x4_t = simd_shuffle!(a, b, [0, 4, 1, 5]);
+ let b0: float32x4_t = simd_shuffle!(a, b, [2, 6, 3, 7]);
transmute((a0, b0))
}
@@ -29035,8 +29035,8 @@ pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t {
- let a0: int8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
- let b0: int8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
+ let a0: int8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
+ let b0: int8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
transmute((a0, b0))
}
@@ -29050,8 +29050,8 @@ pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t {
- let a0: int16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]);
- let b0: int16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]);
+ let a0: int16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]);
+ let b0: int16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]);
transmute((a0, b0))
}
@@ -29065,8 +29065,8 @@ pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
- let a0: int8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]);
- let b0: int8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]);
+ let a0: int8x16_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]);
+ let b0: int8x16_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]);
transmute((a0, b0))
}
@@ -29080,8 +29080,8 @@ pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
- let a0: int16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
- let b0: int16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
+ let a0: int16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
+ let b0: int16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
transmute((a0, b0))
}
@@ -29095,8 +29095,8 @@ pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
- let a0: int32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]);
- let b0: int32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]);
+ let a0: int32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]);
+ let b0: int32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]);
transmute((a0, b0))
}
@@ -29110,8 +29110,8 @@ pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t {
- let a0: uint8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
- let b0: uint8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
+ let a0: uint8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
+ let b0: uint8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
transmute((a0, b0))
}
@@ -29125,8 +29125,8 @@ pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t {
- let a0: uint16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]);
- let b0: uint16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]);
+ let a0: uint16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]);
+ let b0: uint16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]);
transmute((a0, b0))
}
@@ -29140,8 +29140,8 @@ pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
- let a0: uint8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]);
- let b0: uint8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]);
+ let a0: uint8x16_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]);
+ let b0: uint8x16_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]);
transmute((a0, b0))
}
@@ -29155,8 +29155,8 @@ pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
- let a0: uint16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
- let b0: uint16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
+ let a0: uint16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
+ let b0: uint16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
transmute((a0, b0))
}
@@ -29170,8 +29170,8 @@ pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
- let a0: uint32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]);
- let b0: uint32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]);
+ let a0: uint32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]);
+ let b0: uint32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]);
transmute((a0, b0))
}
@@ -29185,8 +29185,8 @@ pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t {
- let a0: poly8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
- let b0: poly8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
+ let a0: poly8x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
+ let b0: poly8x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
transmute((a0, b0))
}
@@ -29200,8 +29200,8 @@ pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t {
- let a0: poly16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]);
- let b0: poly16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]);
+ let a0: poly16x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]);
+ let b0: poly16x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]);
transmute((a0, b0))
}
@@ -29215,8 +29215,8 @@ pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
- let a0: poly8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]);
- let b0: poly8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]);
+ let a0: poly8x16_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]);
+ let b0: poly8x16_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]);
transmute((a0, b0))
}
@@ -29230,8 +29230,8 @@ pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
- let a0: poly16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
- let b0: poly16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
+ let a0: poly16x8_t = simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]);
+ let b0: poly16x8_t = simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]);
transmute((a0, b0))
}
@@ -29245,8 +29245,8 @@ pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t {
- let a0: int32x2_t = simd_shuffle2!(a, b, [0, 2]);
- let b0: int32x2_t = simd_shuffle2!(a, b, [1, 3]);
+ let a0: int32x2_t = simd_shuffle!(a, b, [0, 2]);
+ let b0: int32x2_t = simd_shuffle!(a, b, [1, 3]);
transmute((a0, b0))
}
@@ -29260,8 +29260,8 @@ pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
- let a0: uint32x2_t = simd_shuffle2!(a, b, [0, 2]);
- let b0: uint32x2_t = simd_shuffle2!(a, b, [1, 3]);
+ let a0: uint32x2_t = simd_shuffle!(a, b, [0, 2]);
+ let b0: uint32x2_t = simd_shuffle!(a, b, [1, 3]);
transmute((a0, b0))
}
@@ -29275,8 +29275,8 @@ pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
- let a0: float32x2_t = simd_shuffle2!(a, b, [0, 2]);
- let b0: float32x2_t = simd_shuffle2!(a, b, [1, 3]);
+ let a0: float32x2_t = simd_shuffle!(a, b, [0, 2]);
+ let b0: float32x2_t = simd_shuffle!(a, b, [1, 3]);
transmute((a0, b0))
}
@@ -29290,8 +29290,8 @@ pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t {
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))]
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t {
- let a0: float32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]);
- let b0: float32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]);
+ let a0: float32x4_t = simd_shuffle!(a, b, [0, 2, 4, 6]);
+ let b0: float32x4_t = simd_shuffle!(a, b, [1, 3, 5, 7]);
transmute((a0, b0))
}
@@ -29382,7 +29382,7 @@ pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t {
simd_add(a, simd_cast(e))
}
-/// Singned saturating Absolute value
+/// Signed saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)
#[inline]
@@ -29401,7 +29401,7 @@ pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t {
vqabs_s8_(a)
}
-/// Singned saturating Absolute value
+/// Signed saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)
#[inline]
@@ -29420,7 +29420,7 @@ pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t {
vqabsq_s8_(a)
}
-/// Singned saturating Absolute value
+/// Signed saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)
#[inline]
@@ -29439,7 +29439,7 @@ pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t {
vqabs_s16_(a)
}
-/// Singned saturating Absolute value
+/// Signed saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)
#[inline]
@@ -29458,7 +29458,7 @@ pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t {
vqabsq_s16_(a)
}
-/// Singned saturating Absolute value
+/// Signed saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)
#[inline]
@@ -29477,7 +29477,7 @@ pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t {
vqabs_s32_(a)
}
-/// Singned saturating Absolute value
+/// Signed saturating Absolute value
///
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)
#[inline]
@@ -31448,7 +31448,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vcreate_p64() {
let a: u64 = 1;
let e: i64x1 = i64x1::new(1);
@@ -33836,7 +33836,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vrndn_f32() {
let a: f32x2 = f32x2::new(-1.5, 0.5);
let e: f32x2 = f32x2::new(-2.0, 0.0);
@@ -33844,7 +33845,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vrndnq_f32() {
let a: f32x4 = f32x4::new(-1.5, 0.5, 1.5, 2.5);
let e: f32x4 = f32x4::new(-2.0, 0.0, 2.0, 2.0);
@@ -38406,7 +38408,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vfma_f32() {
let a: f32x2 = f32x2::new(8.0, 18.0);
let b: f32x2 = f32x2::new(6.0, 4.0);
@@ -38416,7 +38419,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vfmaq_f32() {
let a: f32x4 = f32x4::new(8.0, 18.0, 12.0, 10.0);
let b: f32x4 = f32x4::new(6.0, 4.0, 7.0, 8.0);
@@ -38426,7 +38430,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vfma_n_f32() {
let a: f32x2 = f32x2::new(2.0, 3.0);
let b: f32x2 = f32x2::new(6.0, 4.0);
@@ -38436,7 +38441,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vfmaq_n_f32() {
let a: f32x4 = f32x4::new(2.0, 3.0, 4.0, 5.0);
let b: f32x4 = f32x4::new(6.0, 4.0, 7.0, 8.0);
@@ -38446,7 +38452,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vfms_f32() {
let a: f32x2 = f32x2::new(20.0, 30.0);
let b: f32x2 = f32x2::new(6.0, 4.0);
@@ -38456,7 +38463,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vfmsq_f32() {
let a: f32x4 = f32x4::new(20.0, 30.0, 40.0, 50.0);
let b: f32x4 = f32x4::new(6.0, 4.0, 7.0, 8.0);
@@ -38466,7 +38474,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vfms_n_f32() {
let a: f32x2 = f32x2::new(50.0, 35.0);
let b: f32x2 = f32x2::new(6.0, 4.0);
@@ -38476,7 +38485,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vfmsq_n_f32() {
let a: f32x4 = f32x4::new(50.0, 35.0, 60.0, 69.0);
let b: f32x4 = f32x4::new(6.0, 4.0, 7.0, 8.0);
@@ -39167,7 +39177,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vmaxnm_f32() {
let a: f32x2 = f32x2::new(1.0, 2.0);
let b: f32x2 = f32x2::new(8.0, 16.0);
@@ -39176,7 +39187,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vmaxnmq_f32() {
let a: f32x4 = f32x4::new(1.0, 2.0, 3.0, -4.0);
let b: f32x4 = f32x4::new(8.0, 16.0, -1.0, 6.0);
@@ -39311,7 +39323,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vminnm_f32() {
let a: f32x2 = f32x2::new(1.0, 2.0);
let b: f32x2 = f32x2::new(8.0, 16.0);
@@ -39320,7 +39333,8 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,crc"))]
+ #[cfg_attr(target_arch = "aarch64", simd_test(enable = "neon"))]
unsafe fn test_vminnmq_f32() {
let a: f32x4 = f32x4::new(1.0, 2.0, 3.0, -4.0);
let b: f32x4 = f32x4::new(8.0, 16.0, -1.0, 6.0);
@@ -41120,7 +41134,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_s32_p64() {
let a: i64x1 = i64x1::new(0);
let e: i32x2 = i32x2::new(0, 0);
@@ -41128,7 +41142,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_u32_p64() {
let a: i64x1 = i64x1::new(0);
let e: u32x2 = u32x2::new(0, 0);
@@ -41136,7 +41150,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_s32_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i32x4 = i32x4::new(0, 0, 1, 0);
@@ -41144,7 +41158,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_u32_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: u32x4 = u32x4::new(0, 0, 1, 0);
@@ -41152,7 +41166,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_s64_p128() {
let a: p128 = 0;
let e: i64x2 = i64x2::new(0, 0);
@@ -41160,7 +41174,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_u64_p128() {
let a: p128 = 0;
let e: u64x2 = u64x2::new(0, 0);
@@ -41168,7 +41182,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p64_p128() {
let a: p128 = 0;
let e: i64x2 = i64x2::new(0, 0);
@@ -41480,7 +41494,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_p64_s32() {
let a: i32x2 = i32x2::new(0, 0);
let e: i64x1 = i64x1::new(0);
@@ -41488,7 +41502,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_p64_u32() {
let a: u32x2 = u32x2::new(0, 0);
let e: i64x1 = i64x1::new(0);
@@ -41496,7 +41510,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p64_s32() {
let a: i32x4 = i32x4::new(0, 0, 1, 0);
let e: i64x2 = i64x2::new(0, 1);
@@ -41504,7 +41518,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p64_u32() {
let a: u32x4 = u32x4::new(0, 0, 1, 0);
let e: i64x2 = i64x2::new(0, 1);
@@ -41512,7 +41526,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p128_s64() {
let a: i64x2 = i64x2::new(0, 0);
let e: p128 = 0;
@@ -41520,7 +41534,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p128_u64() {
let a: u64x2 = u64x2::new(0, 0);
let e: p128 = 0;
@@ -41528,7 +41542,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p128_p64() {
let a: i64x2 = i64x2::new(0, 0);
let e: p128 = 0;
@@ -41728,7 +41742,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_s16_p64() {
let a: i64x1 = i64x1::new(0);
let e: i16x4 = i16x4::new(0, 0, 0, 0);
@@ -41736,7 +41750,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_u16_p64() {
let a: i64x1 = i64x1::new(0);
let e: u16x4 = u16x4::new(0, 0, 0, 0);
@@ -41744,7 +41758,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_p16_p64() {
let a: i64x1 = i64x1::new(0);
let e: i16x4 = i16x4::new(0, 0, 0, 0);
@@ -41752,7 +41766,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_s16_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
@@ -41760,7 +41774,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_u16_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: u16x8 = u16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
@@ -41768,7 +41782,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p16_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
@@ -41776,7 +41790,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_s32_p128() {
let a: p128 = 0;
let e: i32x4 = i32x4::new(0, 0, 0, 0);
@@ -41784,7 +41798,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_u32_p128() {
let a: p128 = 0;
let e: u32x4 = u32x4::new(0, 0, 0, 0);
@@ -41984,7 +41998,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_p64_p16() {
let a: i16x4 = i16x4::new(0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
@@ -41992,7 +42006,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_p64_s16() {
let a: i16x4 = i16x4::new(0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
@@ -42000,7 +42014,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_p64_u16() {
let a: u16x4 = u16x4::new(0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
@@ -42008,7 +42022,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p64_p16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
@@ -42016,7 +42030,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p64_s16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
@@ -42024,7 +42038,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p64_u16() {
let a: u16x8 = u16x8::new(0, 0, 0, 0, 1, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
@@ -42032,7 +42046,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p128_s32() {
let a: i32x4 = i32x4::new(0, 0, 0, 0);
let e: p128 = 0;
@@ -42040,7 +42054,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p128_u32() {
let a: u32x4 = u32x4::new(0, 0, 0, 0);
let e: p128 = 0;
@@ -42144,7 +42158,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_s8_p64() {
let a: i64x1 = i64x1::new(0);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
@@ -42152,7 +42166,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_u8_p64() {
let a: i64x1 = i64x1::new(0);
let e: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
@@ -42160,7 +42174,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_p8_p64() {
let a: i64x1 = i64x1::new(0);
let e: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
@@ -42168,7 +42182,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_s8_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
@@ -42176,7 +42190,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_u8_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
@@ -42184,7 +42198,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p8_p64() {
let a: i64x2 = i64x2::new(0, 1);
let e: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
@@ -42192,7 +42206,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_s16_p128() {
let a: p128 = 0;
let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
@@ -42200,7 +42214,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_u16_p128() {
let a: p128 = 0;
let e: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
@@ -42208,7 +42222,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p16_p128() {
let a: p128 = 0;
let e: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
@@ -42312,7 +42326,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_p64_p8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
@@ -42320,7 +42334,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_p64_s8() {
let a: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
@@ -42328,7 +42342,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpret_p64_u8() {
let a: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: i64x1 = i64x1::new(0);
@@ -42336,7 +42350,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p64_p8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
@@ -42344,7 +42358,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p64_s8() {
let a: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
@@ -42352,7 +42366,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p64_u8() {
let a: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0);
let e: i64x2 = i64x2::new(0, 1);
@@ -42360,7 +42374,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p128_s16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: p128 = 0;
@@ -42368,7 +42382,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p128_u16() {
let a: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: p128 = 0;
@@ -42376,7 +42390,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p128_p16() {
let a: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
let e: p128 = 0;
@@ -42384,7 +42398,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p128_s8() {
let a: i8x16 = i8x16::new(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let e: p128 = 1;
@@ -42392,7 +42406,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p128_u8() {
let a: u8x16 = u8x16::new(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let e: p128 = 1;
@@ -42400,7 +42414,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p128_p8() {
let a: i8x16 = i8x16::new(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
let e: p128 = 1;
@@ -42408,7 +42422,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_s8_p128() {
let a: p128 = 1;
let e: i8x16 = i8x16::new(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
@@ -42416,7 +42430,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_u8_p128() {
let a: p128 = 1;
let e: u8x16 = u8x16::new(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
@@ -42424,7 +42438,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vreinterpretq_p8_p128() {
let a: p128 = 1;
let e: i8x16 = i8x16::new(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
@@ -43376,7 +43390,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vset_lane_p64() {
let a: p64 = 1;
let b: i64x1 = i64x1::new(0);
@@ -43475,7 +43489,7 @@ mod test {
assert_eq!(r, e);
}
- #[simd_test(enable = "neon")]
+ #[simd_test(enable = "neon,aes")]
unsafe fn test_vsetq_lane_p64() {
let a: p64 = 1;
let b: i64x2 = i64x2::new(0, 2);
diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs
index 31e924b84..923265966 100644
--- a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs
+++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs
@@ -975,7 +975,7 @@ extern "unadjusted" {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_s8<const LANE: i32>(ptr: *const i8, src: int8x8_t) -> int8x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
simd_insert(src, LANE as u32, *ptr)
}
@@ -991,7 +991,7 @@ pub unsafe fn vld1_lane_s8<const LANE: i32>(ptr: *const i8, src: int8x8_t) -> in
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_s8<const LANE: i32>(ptr: *const i8, src: int8x16_t) -> int8x16_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1007,7 +1007,7 @@ pub unsafe fn vld1q_lane_s8<const LANE: i32>(ptr: *const i8, src: int8x16_t) ->
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_s16<const LANE: i32>(ptr: *const i16, src: int16x4_t) -> int16x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1023,7 +1023,7 @@ pub unsafe fn vld1_lane_s16<const LANE: i32>(ptr: *const i16, src: int16x4_t) ->
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_s16<const LANE: i32>(ptr: *const i16, src: int16x8_t) -> int16x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1039,7 +1039,7 @@ pub unsafe fn vld1q_lane_s16<const LANE: i32>(ptr: *const i16, src: int16x8_t) -
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_s32<const LANE: i32>(ptr: *const i32, src: int32x2_t) -> int32x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1055,7 +1055,7 @@ pub unsafe fn vld1_lane_s32<const LANE: i32>(ptr: *const i32, src: int32x2_t) ->
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_s32<const LANE: i32>(ptr: *const i32, src: int32x4_t) -> int32x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1071,7 +1071,7 @@ pub unsafe fn vld1q_lane_s32<const LANE: i32>(ptr: *const i32, src: int32x4_t) -
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x1_t) -> int64x1_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1087,7 +1087,7 @@ pub unsafe fn vld1_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x1_t) ->
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x2_t) -> int64x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1103,7 +1103,7 @@ pub unsafe fn vld1q_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x2_t) -
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_u8<const LANE: i32>(ptr: *const u8, src: uint8x8_t) -> uint8x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1119,7 +1119,7 @@ pub unsafe fn vld1_lane_u8<const LANE: i32>(ptr: *const u8, src: uint8x8_t) -> u
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_u8<const LANE: i32>(ptr: *const u8, src: uint8x16_t) -> uint8x16_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1135,7 +1135,7 @@ pub unsafe fn vld1q_lane_u8<const LANE: i32>(ptr: *const u8, src: uint8x16_t) ->
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_u16<const LANE: i32>(ptr: *const u16, src: uint16x4_t) -> uint16x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1151,7 +1151,7 @@ pub unsafe fn vld1_lane_u16<const LANE: i32>(ptr: *const u16, src: uint16x4_t) -
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_u16<const LANE: i32>(ptr: *const u16, src: uint16x8_t) -> uint16x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1167,7 +1167,7 @@ pub unsafe fn vld1q_lane_u16<const LANE: i32>(ptr: *const u16, src: uint16x8_t)
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_u32<const LANE: i32>(ptr: *const u32, src: uint32x2_t) -> uint32x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1183,7 +1183,7 @@ pub unsafe fn vld1_lane_u32<const LANE: i32>(ptr: *const u32, src: uint32x2_t) -
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_u32<const LANE: i32>(ptr: *const u32, src: uint32x4_t) -> uint32x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1199,7 +1199,7 @@ pub unsafe fn vld1q_lane_u32<const LANE: i32>(ptr: *const u32, src: uint32x4_t)
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x1_t) -> uint64x1_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1215,7 +1215,7 @@ pub unsafe fn vld1_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x1_t) -
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x2_t) -> uint64x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1231,7 +1231,7 @@ pub unsafe fn vld1q_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x2_t)
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_p8<const LANE: i32>(ptr: *const p8, src: poly8x8_t) -> poly8x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1247,7 +1247,7 @@ pub unsafe fn vld1_lane_p8<const LANE: i32>(ptr: *const p8, src: poly8x8_t) -> p
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_p8<const LANE: i32>(ptr: *const p8, src: poly8x16_t) -> poly8x16_t {
- static_assert_imm4!(LANE);
+ static_assert_uimm_bits!(LANE, 4);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1263,7 +1263,7 @@ pub unsafe fn vld1q_lane_p8<const LANE: i32>(ptr: *const p8, src: poly8x16_t) ->
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_p16<const LANE: i32>(ptr: *const p16, src: poly16x4_t) -> poly16x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1279,7 +1279,7 @@ pub unsafe fn vld1_lane_p16<const LANE: i32>(ptr: *const p16, src: poly16x4_t) -
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_p16<const LANE: i32>(ptr: *const p16, src: poly16x8_t) -> poly16x8_t {
- static_assert_imm3!(LANE);
+ static_assert_uimm_bits!(LANE, 3);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1297,7 +1297,7 @@ pub unsafe fn vld1q_lane_p16<const LANE: i32>(ptr: *const p16, src: poly16x8_t)
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x1_t) -> poly64x1_t {
- static_assert!(LANE : i32 where LANE == 0);
+ static_assert!(LANE == 0);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1315,7 +1315,7 @@ pub unsafe fn vld1_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x1_t) -
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x2_t) -> poly64x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1331,7 +1331,7 @@ pub unsafe fn vld1q_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x2_t)
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1_lane_f32<const LANE: i32>(ptr: *const f32, src: float32x2_t) -> float32x2_t {
- static_assert_imm1!(LANE);
+ static_assert_uimm_bits!(LANE, 1);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1347,7 +1347,7 @@ pub unsafe fn vld1_lane_f32<const LANE: i32>(ptr: *const f32, src: float32x2_t)
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vld1q_lane_f32<const LANE: i32>(ptr: *const f32, src: float32x4_t) -> float32x4_t {
- static_assert_imm2!(LANE);
+ static_assert_uimm_bits!(LANE, 2);
simd_insert(src, LANE as u32, *ptr)
}
@@ -1363,7 +1363,7 @@ pub unsafe fn vld1q_lane_f32<const LANE: i32>(ptr: *const f32, src: float32x4_t)
)]
pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t {
let x = vld1_lane_s8::<0>(ptr, transmute(i8x8::splat(0)));
- simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1378,7 +1378,7 @@ pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t {
)]
pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t {
let x = vld1q_lane_s8::<0>(ptr, transmute(i8x16::splat(0)));
- simd_shuffle16!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1393,7 +1393,7 @@ pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t {
)]
pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t {
let x = vld1_lane_s16::<0>(ptr, transmute(i16x4::splat(0)));
- simd_shuffle4!(x, x, [0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1408,7 +1408,7 @@ pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t {
)]
pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t {
let x = vld1q_lane_s16::<0>(ptr, transmute(i16x8::splat(0)));
- simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1423,7 +1423,7 @@ pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t {
)]
pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t {
let x = vld1_lane_s32::<0>(ptr, transmute(i32x2::splat(0)));
- simd_shuffle2!(x, x, [0, 0])
+ simd_shuffle!(x, x, [0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1438,7 +1438,7 @@ pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t {
)]
pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t {
let x = vld1q_lane_s32::<0>(ptr, transmute(i32x4::splat(0)));
- simd_shuffle4!(x, x, [0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1474,7 +1474,7 @@ pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t {
)]
pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t {
let x = vld1q_lane_s64::<0>(ptr, transmute(i64x2::splat(0)));
- simd_shuffle2!(x, x, [0, 0])
+ simd_shuffle!(x, x, [0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1489,7 +1489,7 @@ pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t {
)]
pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t {
let x = vld1_lane_u8::<0>(ptr, transmute(u8x8::splat(0)));
- simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1504,7 +1504,7 @@ pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t {
)]
pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t {
let x = vld1q_lane_u8::<0>(ptr, transmute(u8x16::splat(0)));
- simd_shuffle16!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1519,7 +1519,7 @@ pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t {
)]
pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t {
let x = vld1_lane_u16::<0>(ptr, transmute(u16x4::splat(0)));
- simd_shuffle4!(x, x, [0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1534,7 +1534,7 @@ pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t {
)]
pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t {
let x = vld1q_lane_u16::<0>(ptr, transmute(u16x8::splat(0)));
- simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1549,7 +1549,7 @@ pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t {
)]
pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t {
let x = vld1_lane_u32::<0>(ptr, transmute(u32x2::splat(0)));
- simd_shuffle2!(x, x, [0, 0])
+ simd_shuffle!(x, x, [0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1564,7 +1564,7 @@ pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t {
)]
pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t {
let x = vld1q_lane_u32::<0>(ptr, transmute(u32x4::splat(0)));
- simd_shuffle4!(x, x, [0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1600,7 +1600,7 @@ pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t {
)]
pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t {
let x = vld1q_lane_u64::<0>(ptr, transmute(u64x2::splat(0)));
- simd_shuffle2!(x, x, [0, 0])
+ simd_shuffle!(x, x, [0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1615,7 +1615,7 @@ pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t {
)]
pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t {
let x = vld1_lane_p8::<0>(ptr, transmute(u8x8::splat(0)));
- simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1630,7 +1630,7 @@ pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t {
)]
pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t {
let x = vld1q_lane_p8::<0>(ptr, transmute(u8x16::splat(0)));
- simd_shuffle16!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1645,7 +1645,7 @@ pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t {
)]
pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t {
let x = vld1_lane_p16::<0>(ptr, transmute(u16x4::splat(0)));
- simd_shuffle4!(x, x, [0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1660,7 +1660,7 @@ pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t {
)]
pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t {
let x = vld1q_lane_p16::<0>(ptr, transmute(u16x8::splat(0)));
- simd_shuffle8!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0, 0, 0, 0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1675,7 +1675,7 @@ pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t {
)]
pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t {
let x = vld1_lane_f32::<0>(ptr, transmute(f32x2::splat(0.)));
- simd_shuffle2!(x, x, [0, 0])
+ simd_shuffle!(x, x, [0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1715,7 +1715,7 @@ pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t {
)]
pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t {
let x = vld1q_lane_p64::<0>(ptr, transmute(u64x2::splat(0)));
- simd_shuffle2!(x, x, [0, 0])
+ simd_shuffle!(x, x, [0, 0])
}
/// Load one single-element structure and Replicate to all lanes (of one register).
@@ -1730,7 +1730,7 @@ pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t {
)]
pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t {
let x = vld1q_lane_f32::<0>(ptr, transmute(f32x4::splat(0.)));
- simd_shuffle4!(x, x, [0, 0, 0, 0])
+ simd_shuffle!(x, x, [0, 0, 0, 0])
}
// signed absolute difference and accumulate (64-bit)
@@ -2369,8 +2369,8 @@ pub unsafe fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
- let a: int8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
- let b: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
let a: int16x8_t = simd_cast(a);
let b: int16x8_t = simd_cast(b);
simd_add(a, b)
@@ -2387,8 +2387,8 @@ pub unsafe fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
- let a: int16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
- let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
+ let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
let a: int32x4_t = simd_cast(a);
let b: int32x4_t = simd_cast(b);
simd_add(a, b)
@@ -2405,8 +2405,8 @@ pub unsafe fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
- let a: int32x2_t = simd_shuffle2!(a, a, [2, 3]);
- let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
+ let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
let a: int64x2_t = simd_cast(a);
let b: int64x2_t = simd_cast(b);
simd_add(a, b)
@@ -2423,8 +2423,8 @@ pub unsafe fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
- let a: uint8x8_t = simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
- let b: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
let a: uint16x8_t = simd_cast(a);
let b: uint16x8_t = simd_cast(b);
simd_add(a, b)
@@ -2441,8 +2441,8 @@ pub unsafe fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
- let a: uint16x4_t = simd_shuffle4!(a, a, [4, 5, 6, 7]);
- let b: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
+ let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
let a: uint32x4_t = simd_cast(a);
let b: uint32x4_t = simd_cast(b);
simd_add(a, b)
@@ -2459,8 +2459,8 @@ pub unsafe fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
- let a: uint32x2_t = simd_shuffle2!(a, a, [2, 3]);
- let b: uint32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
+ let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
let a: uint64x2_t = simd_cast(a);
let b: uint64x2_t = simd_cast(b);
simd_add(a, b)
@@ -2567,7 +2567,7 @@ pub unsafe fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
- let b: int8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
let b: int16x8_t = simd_cast(b);
simd_add(a, b)
}
@@ -2583,7 +2583,7 @@ pub unsafe fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
- let b: int16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
let b: int32x4_t = simd_cast(b);
simd_add(a, b)
}
@@ -2599,7 +2599,7 @@ pub unsafe fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
- let b: int32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
let b: int64x2_t = simd_cast(b);
simd_add(a, b)
}
@@ -2615,7 +2615,7 @@ pub unsafe fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
- let b: uint8x8_t = simd_shuffle8!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
+ let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
let b: uint16x8_t = simd_cast(b);
simd_add(a, b)
}
@@ -2631,7 +2631,7 @@ pub unsafe fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
- let b: uint16x4_t = simd_shuffle4!(b, b, [4, 5, 6, 7]);
+ let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
let b: uint32x4_t = simd_cast(b);
simd_add(a, b)
}
@@ -2647,7 +2647,7 @@ pub unsafe fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
- let b: uint32x2_t = simd_shuffle2!(b, b, [2, 3]);
+ let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
let b: uint64x2_t = simd_cast(b);
simd_add(a, b)
}
@@ -2748,7 +2748,7 @@ pub unsafe fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
)]
pub unsafe fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t {
let x = simd_cast(simd_shr(simd_add(a, b), int16x8_t(8, 8, 8, 8, 8, 8, 8, 8)));
- simd_shuffle16!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Add returning High Narrow (high half).
@@ -2763,7 +2763,7 @@ pub unsafe fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x1
)]
pub unsafe fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t {
let x = simd_cast(simd_shr(simd_add(a, b), int32x4_t(16, 16, 16, 16)));
- simd_shuffle8!(r, x, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Add returning High Narrow (high half).
@@ -2778,7 +2778,7 @@ pub unsafe fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16
)]
pub unsafe fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t {
let x = simd_cast(simd_shr(simd_add(a, b), int64x2_t(32, 32)));
- simd_shuffle4!(r, x, [0, 1, 2, 3])
+ simd_shuffle!(r, x, [0, 1, 2, 3])
}
/// Add returning High Narrow (high half).
@@ -2793,7 +2793,7 @@ pub unsafe fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32
)]
pub unsafe fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t {
let x = simd_cast(simd_shr(simd_add(a, b), uint16x8_t(8, 8, 8, 8, 8, 8, 8, 8)));
- simd_shuffle16!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Add returning High Narrow (high half).
@@ -2808,7 +2808,7 @@ pub unsafe fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uin
)]
pub unsafe fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t {
let x = simd_cast(simd_shr(simd_add(a, b), uint32x4_t(16, 16, 16, 16)));
- simd_shuffle8!(r, x, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Add returning High Narrow (high half).
@@ -2823,7 +2823,7 @@ pub unsafe fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> ui
)]
pub unsafe fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t {
let x = simd_cast(simd_shr(simd_add(a, b), uint64x2_t(32, 32)));
- simd_shuffle4!(r, x, [0, 1, 2, 3])
+ simd_shuffle!(r, x, [0, 1, 2, 3])
}
/// Rounding Add returning High Narrow.
@@ -2922,7 +2922,7 @@ pub unsafe fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t {
)]
pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t {
let x = vraddhn_s16_(a, b);
- simd_shuffle16!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Rounding Add returning High Narrow (high half).
@@ -2937,7 +2937,7 @@ pub unsafe fn vraddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x
)]
pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t {
let x = vraddhn_s32_(a, b);
- simd_shuffle8!(r, x, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Rounding Add returning High Narrow (high half).
@@ -2952,7 +2952,7 @@ pub unsafe fn vraddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int1
)]
pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t {
let x = vraddhn_s64_(a, b);
- simd_shuffle4!(r, x, [0, 1, 2, 3])
+ simd_shuffle!(r, x, [0, 1, 2, 3])
}
/// Rounding Add returning High Narrow (high half).
@@ -2967,7 +2967,7 @@ pub unsafe fn vraddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int3
)]
pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t {
let x: uint8x8_t = transmute(vraddhn_s16_(transmute(a), transmute(b)));
- simd_shuffle16!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
}
/// Rounding Add returning High Narrow (high half).
@@ -2982,7 +2982,7 @@ pub unsafe fn vraddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> ui
)]
pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_t {
let x: uint16x4_t = transmute(vraddhn_s32_(transmute(a), transmute(b)));
- simd_shuffle8!(r, x, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(r, x, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Rounding Add returning High Narrow (high half).
@@ -2997,7 +2997,7 @@ pub unsafe fn vraddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> u
)]
pub unsafe fn vraddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_t {
let x: uint32x2_t = transmute(vraddhn_s64_(transmute(a), transmute(b)));
- simd_shuffle4!(r, x, [0, 1, 2, 3])
+ simd_shuffle!(r, x, [0, 1, 2, 3])
}
/// Signed Add Long Pairwise.
@@ -4655,7 +4655,7 @@ pub unsafe fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_u64<const IMM5: i32>(v: uint64x2_t) -> u64 {
- static_assert_imm1!(IMM5);
+ static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
@@ -4670,7 +4670,7 @@ pub unsafe fn vgetq_lane_u64<const IMM5: i32>(v: uint64x2_t) -> u64 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_u64<const IMM5: i32>(v: uint64x1_t) -> u64 {
- static_assert!(IMM5 : i32 where IMM5 == 0);
+ static_assert!(IMM5 == 0);
simd_extract(v, 0)
}
@@ -4685,7 +4685,7 @@ pub unsafe fn vget_lane_u64<const IMM5: i32>(v: uint64x1_t) -> u64 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_u16<const IMM5: i32>(v: uint16x4_t) -> u16 {
- static_assert_imm2!(IMM5);
+ static_assert_uimm_bits!(IMM5, 2);
simd_extract(v, IMM5 as u32)
}
@@ -4700,7 +4700,7 @@ pub unsafe fn vget_lane_u16<const IMM5: i32>(v: uint16x4_t) -> u16 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_s16<const IMM5: i32>(v: int16x4_t) -> i16 {
- static_assert_imm2!(IMM5);
+ static_assert_uimm_bits!(IMM5, 2);
simd_extract(v, IMM5 as u32)
}
@@ -4715,7 +4715,7 @@ pub unsafe fn vget_lane_s16<const IMM5: i32>(v: int16x4_t) -> i16 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_p16<const IMM5: i32>(v: poly16x4_t) -> p16 {
- static_assert_imm2!(IMM5);
+ static_assert_uimm_bits!(IMM5, 2);
simd_extract(v, IMM5 as u32)
}
@@ -4730,7 +4730,7 @@ pub unsafe fn vget_lane_p16<const IMM5: i32>(v: poly16x4_t) -> p16 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_u32<const IMM5: i32>(v: uint32x2_t) -> u32 {
- static_assert_imm1!(IMM5);
+ static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
@@ -4745,7 +4745,7 @@ pub unsafe fn vget_lane_u32<const IMM5: i32>(v: uint32x2_t) -> u32 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_s32<const IMM5: i32>(v: int32x2_t) -> i32 {
- static_assert_imm1!(IMM5);
+ static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
@@ -4760,7 +4760,7 @@ pub unsafe fn vget_lane_s32<const IMM5: i32>(v: int32x2_t) -> i32 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_f32<const IMM5: i32>(v: float32x2_t) -> f32 {
- static_assert_imm1!(IMM5);
+ static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
@@ -4775,7 +4775,7 @@ pub unsafe fn vget_lane_f32<const IMM5: i32>(v: float32x2_t) -> f32 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_f32<const IMM5: i32>(v: float32x4_t) -> f32 {
- static_assert_imm2!(IMM5);
+ static_assert_uimm_bits!(IMM5, 2);
simd_extract(v, IMM5 as u32)
}
@@ -4790,7 +4790,7 @@ pub unsafe fn vgetq_lane_f32<const IMM5: i32>(v: float32x4_t) -> f32 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_p64<const IMM5: i32>(v: poly64x1_t) -> p64 {
- static_assert!(IMM5 : i32 where IMM5 == 0);
+ static_assert!(IMM5 == 0);
simd_extract(v, IMM5 as u32)
}
@@ -4805,7 +4805,7 @@ pub unsafe fn vget_lane_p64<const IMM5: i32>(v: poly64x1_t) -> p64 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_p64<const IMM5: i32>(v: poly64x2_t) -> p64 {
- static_assert_imm1!(IMM5);
+ static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
@@ -4820,7 +4820,7 @@ pub unsafe fn vgetq_lane_p64<const IMM5: i32>(v: poly64x2_t) -> p64 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_s64<const IMM5: i32>(v: int64x1_t) -> i64 {
- static_assert!(IMM5 : i32 where IMM5 == 0);
+ static_assert!(IMM5 == 0);
simd_extract(v, IMM5 as u32)
}
@@ -4835,7 +4835,7 @@ pub unsafe fn vget_lane_s64<const IMM5: i32>(v: int64x1_t) -> i64 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_s64<const IMM5: i32>(v: int64x2_t) -> i64 {
- static_assert_imm1!(IMM5);
+ static_assert_uimm_bits!(IMM5, 1);
simd_extract(v, IMM5 as u32)
}
@@ -4850,7 +4850,7 @@ pub unsafe fn vgetq_lane_s64<const IMM5: i32>(v: int64x2_t) -> i64 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_u16<const IMM5: i32>(v: uint16x8_t) -> u16 {
- static_assert_imm3!(IMM5);
+ static_assert_uimm_bits!(IMM5, 3);
simd_extract(v, IMM5 as u32)
}
@@ -4865,7 +4865,7 @@ pub unsafe fn vgetq_lane_u16<const IMM5: i32>(v: uint16x8_t) -> u16 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_u32<const IMM5: i32>(v: uint32x4_t) -> u32 {
- static_assert_imm2!(IMM5);
+ static_assert_uimm_bits!(IMM5, 2);
simd_extract(v, IMM5 as u32)
}
@@ -4880,7 +4880,7 @@ pub unsafe fn vgetq_lane_u32<const IMM5: i32>(v: uint32x4_t) -> u32 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_s16<const IMM5: i32>(v: int16x8_t) -> i16 {
- static_assert_imm3!(IMM5);
+ static_assert_uimm_bits!(IMM5, 3);
simd_extract(v, IMM5 as u32)
}
@@ -4895,7 +4895,7 @@ pub unsafe fn vgetq_lane_s16<const IMM5: i32>(v: int16x8_t) -> i16 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_p16<const IMM5: i32>(v: poly16x8_t) -> p16 {
- static_assert_imm3!(IMM5);
+ static_assert_uimm_bits!(IMM5, 3);
simd_extract(v, IMM5 as u32)
}
@@ -4910,7 +4910,7 @@ pub unsafe fn vgetq_lane_p16<const IMM5: i32>(v: poly16x8_t) -> p16 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_s32<const IMM5: i32>(v: int32x4_t) -> i32 {
- static_assert_imm2!(IMM5);
+ static_assert_uimm_bits!(IMM5, 2);
simd_extract(v, IMM5 as u32)
}
@@ -4925,7 +4925,7 @@ pub unsafe fn vgetq_lane_s32<const IMM5: i32>(v: int32x4_t) -> i32 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_u8<const IMM5: i32>(v: uint8x8_t) -> u8 {
- static_assert_imm3!(IMM5);
+ static_assert_uimm_bits!(IMM5, 3);
simd_extract(v, IMM5 as u32)
}
@@ -4940,7 +4940,7 @@ pub unsafe fn vget_lane_u8<const IMM5: i32>(v: uint8x8_t) -> u8 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_s8<const IMM5: i32>(v: int8x8_t) -> i8 {
- static_assert_imm3!(IMM5);
+ static_assert_uimm_bits!(IMM5, 3);
simd_extract(v, IMM5 as u32)
}
@@ -4955,7 +4955,7 @@ pub unsafe fn vget_lane_s8<const IMM5: i32>(v: int8x8_t) -> i8 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_lane_p8<const IMM5: i32>(v: poly8x8_t) -> p8 {
- static_assert_imm3!(IMM5);
+ static_assert_uimm_bits!(IMM5, 3);
simd_extract(v, IMM5 as u32)
}
@@ -4970,7 +4970,7 @@ pub unsafe fn vget_lane_p8<const IMM5: i32>(v: poly8x8_t) -> p8 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_u8<const IMM5: i32>(v: uint8x16_t) -> u8 {
- static_assert_imm4!(IMM5);
+ static_assert_uimm_bits!(IMM5, 4);
simd_extract(v, IMM5 as u32)
}
@@ -4985,7 +4985,7 @@ pub unsafe fn vgetq_lane_u8<const IMM5: i32>(v: uint8x16_t) -> u8 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_s8<const IMM5: i32>(v: int8x16_t) -> i8 {
- static_assert_imm4!(IMM5);
+ static_assert_uimm_bits!(IMM5, 4);
simd_extract(v, IMM5 as u32)
}
@@ -5000,7 +5000,7 @@ pub unsafe fn vgetq_lane_s8<const IMM5: i32>(v: int8x16_t) -> i8 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vgetq_lane_p8<const IMM5: i32>(v: poly8x16_t) -> p8 {
- static_assert_imm4!(IMM5);
+ static_assert_uimm_bits!(IMM5, 4);
simd_extract(v, IMM5 as u32)
}
@@ -5015,7 +5015,7 @@ pub unsafe fn vgetq_lane_p8<const IMM5: i32>(v: poly8x16_t) -> p8 {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t {
- simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15])
}
/// Duplicate vector element to vector or scalar
@@ -5029,7 +5029,7 @@ pub unsafe fn vget_high_s8(a: int8x16_t) -> int8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t {
- simd_shuffle4!(a, a, [4, 5, 6, 7])
+ simd_shuffle!(a, a, [4, 5, 6, 7])
}
/// Duplicate vector element to vector or scalar
@@ -5043,7 +5043,7 @@ pub unsafe fn vget_high_s16(a: int16x8_t) -> int16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_s32(a: int32x4_t) -> int32x2_t {
- simd_shuffle2!(a, a, [2, 3])
+ simd_shuffle!(a, a, [2, 3])
}
/// Duplicate vector element to vector or scalar
@@ -5071,7 +5071,7 @@ pub unsafe fn vget_high_s64(a: int64x2_t) -> int64x1_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t {
- simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15])
}
/// Duplicate vector element to vector or scalar
@@ -5085,7 +5085,7 @@ pub unsafe fn vget_high_u8(a: uint8x16_t) -> uint8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t {
- simd_shuffle4!(a, a, [4, 5, 6, 7])
+ simd_shuffle!(a, a, [4, 5, 6, 7])
}
/// Duplicate vector element to vector or scalar
@@ -5099,7 +5099,7 @@ pub unsafe fn vget_high_u16(a: uint16x8_t) -> uint16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_u32(a: uint32x4_t) -> uint32x2_t {
- simd_shuffle2!(a, a, [2, 3])
+ simd_shuffle!(a, a, [2, 3])
}
/// Duplicate vector element to vector or scalar
@@ -5127,7 +5127,7 @@ pub unsafe fn vget_high_u64(a: uint64x2_t) -> uint64x1_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t {
- simd_shuffle8!(a, a, [8, 9, 10, 11, 12, 13, 14, 15])
+ simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15])
}
/// Duplicate vector element to vector or scalar
@@ -5141,7 +5141,7 @@ pub unsafe fn vget_high_p8(a: poly8x16_t) -> poly8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t {
- simd_shuffle4!(a, a, [4, 5, 6, 7])
+ simd_shuffle!(a, a, [4, 5, 6, 7])
}
/// Duplicate vector element to vector or scalar
@@ -5155,7 +5155,7 @@ pub unsafe fn vget_high_p16(a: poly16x8_t) -> poly16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_high_f32(a: float32x4_t) -> float32x2_t {
- simd_shuffle2!(a, a, [2, 3])
+ simd_shuffle!(a, a, [2, 3])
}
/// Duplicate vector element to vector or scalar
@@ -5168,7 +5168,7 @@ pub unsafe fn vget_high_f32(a: float32x4_t) -> float32x2_t {
stable(feature = "vget_low_s8", since = "1.60.0")
)]
pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t {
- simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Duplicate vector element to vector or scalar
@@ -5181,7 +5181,7 @@ pub unsafe fn vget_low_s8(a: int8x16_t) -> int8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t {
- simd_shuffle4!(a, a, [0, 1, 2, 3])
+ simd_shuffle!(a, a, [0, 1, 2, 3])
}
/// Duplicate vector element to vector or scalar
@@ -5194,7 +5194,7 @@ pub unsafe fn vget_low_s16(a: int16x8_t) -> int16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_s32(a: int32x4_t) -> int32x2_t {
- simd_shuffle2!(a, a, [0, 1])
+ simd_shuffle!(a, a, [0, 1])
}
/// Duplicate vector element to vector or scalar
@@ -5220,7 +5220,7 @@ pub unsafe fn vget_low_s64(a: int64x2_t) -> int64x1_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t {
- simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Duplicate vector element to vector or scalar
@@ -5233,7 +5233,7 @@ pub unsafe fn vget_low_u8(a: uint8x16_t) -> uint8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t {
- simd_shuffle4!(a, a, [0, 1, 2, 3])
+ simd_shuffle!(a, a, [0, 1, 2, 3])
}
/// Duplicate vector element to vector or scalar
@@ -5246,7 +5246,7 @@ pub unsafe fn vget_low_u16(a: uint16x8_t) -> uint16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_u32(a: uint32x4_t) -> uint32x2_t {
- simd_shuffle2!(a, a, [0, 1])
+ simd_shuffle!(a, a, [0, 1])
}
/// Duplicate vector element to vector or scalar
@@ -5272,7 +5272,7 @@ pub unsafe fn vget_low_u64(a: uint64x2_t) -> uint64x1_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t {
- simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Duplicate vector element to vector or scalar
@@ -5285,7 +5285,7 @@ pub unsafe fn vget_low_p8(a: poly8x16_t) -> poly8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t {
- simd_shuffle4!(a, a, [0, 1, 2, 3])
+ simd_shuffle!(a, a, [0, 1, 2, 3])
}
/// Duplicate vector element to vector or scalar
@@ -5298,7 +5298,7 @@ pub unsafe fn vget_low_p16(a: poly16x8_t) -> poly16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vget_low_f32(a: float32x4_t) -> float32x2_t {
- simd_shuffle2!(a, a, [0, 1])
+ simd_shuffle!(a, a, [0, 1])
}
/// Duplicate vector element to vector or scalar
@@ -6000,7 +6000,7 @@ pub unsafe fn vmovq_n_f32(value: f32) -> float32x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vext_s64<const N: i32>(a: int64x1_t, _b: int64x1_t) -> int64x1_t {
- static_assert!(N : i32 where N == 0);
+ static_assert!(N == 0);
a
}
@@ -6016,7 +6016,7 @@ pub unsafe fn vext_s64<const N: i32>(a: int64x1_t, _b: int64x1_t) -> int64x1_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vext_u64<const N: i32>(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_t {
- static_assert!(N : i32 where N == 0);
+ static_assert!(N == 0);
a
}
@@ -6110,7 +6110,7 @@ pub unsafe fn vcntq_p8(a: poly8x16_t) -> poly8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t {
- simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
+ simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
}
/// Reversing vector elements (swap endianness)
@@ -6124,7 +6124,7 @@ pub unsafe fn vrev16_s8(a: int8x8_t) -> int8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t {
- simd_shuffle16!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14])
+ simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14])
}
/// Reversing vector elements (swap endianness)
@@ -6138,7 +6138,7 @@ pub unsafe fn vrev16q_s8(a: int8x16_t) -> int8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t {
- simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
+ simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
}
/// Reversing vector elements (swap endianness)
@@ -6152,7 +6152,7 @@ pub unsafe fn vrev16_u8(a: uint8x8_t) -> uint8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t {
- simd_shuffle16!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14])
+ simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14])
}
/// Reversing vector elements (swap endianness)
@@ -6166,7 +6166,7 @@ pub unsafe fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t {
- simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
+ simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
}
/// Reversing vector elements (swap endianness)
@@ -6180,7 +6180,7 @@ pub unsafe fn vrev16_p8(a: poly8x8_t) -> poly8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t {
- simd_shuffle16!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14])
+ simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14])
}
/// Reversing vector elements (swap endianness)
@@ -6194,7 +6194,7 @@ pub unsafe fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t {
- simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
+ simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
}
/// Reversing vector elements (swap endianness)
@@ -6208,7 +6208,7 @@ pub unsafe fn vrev32_s8(a: int8x8_t) -> int8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t {
- simd_shuffle16!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12])
+ simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12])
}
/// Reversing vector elements (swap endianness)
@@ -6222,7 +6222,7 @@ pub unsafe fn vrev32q_s8(a: int8x16_t) -> int8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t {
- simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
+ simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
}
/// Reversing vector elements (swap endianness)
@@ -6236,7 +6236,7 @@ pub unsafe fn vrev32_u8(a: uint8x8_t) -> uint8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t {
- simd_shuffle16!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12])
+ simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12])
}
/// Reversing vector elements (swap endianness)
@@ -6250,7 +6250,7 @@ pub unsafe fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t {
- simd_shuffle4!(a, a, [1, 0, 3, 2])
+ simd_shuffle!(a, a, [1, 0, 3, 2])
}
/// Reversing vector elements (swap endianness)
@@ -6264,7 +6264,7 @@ pub unsafe fn vrev32_s16(a: int16x4_t) -> int16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t {
- simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
+ simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
}
/// Reversing vector elements (swap endianness)
@@ -6278,7 +6278,7 @@ pub unsafe fn vrev32q_s16(a: int16x8_t) -> int16x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t {
- simd_shuffle4!(a, a, [1, 0, 3, 2])
+ simd_shuffle!(a, a, [1, 0, 3, 2])
}
/// Reversing vector elements (swap endianness)
@@ -6292,7 +6292,7 @@ pub unsafe fn vrev32_p16(a: poly16x4_t) -> poly16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t {
- simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
+ simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
}
/// Reversing vector elements (swap endianness)
@@ -6306,7 +6306,7 @@ pub unsafe fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t {
- simd_shuffle4!(a, a, [1, 0, 3, 2])
+ simd_shuffle!(a, a, [1, 0, 3, 2])
}
/// Reversing vector elements (swap endianness)
@@ -6320,7 +6320,7 @@ pub unsafe fn vrev32_u16(a: uint16x4_t) -> uint16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t {
- simd_shuffle8!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
+ simd_shuffle!(a, a, [1, 0, 3, 2, 5, 4, 7, 6])
}
/// Reversing vector elements (swap endianness)
@@ -6334,7 +6334,7 @@ pub unsafe fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t {
- simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
+ simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
}
/// Reversing vector elements (swap endianness)
@@ -6348,7 +6348,7 @@ pub unsafe fn vrev32_p8(a: poly8x8_t) -> poly8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t {
- simd_shuffle16!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12])
+ simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12])
}
/// Reversing vector elements (swap endianness)
@@ -6362,7 +6362,7 @@ pub unsafe fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t {
- simd_shuffle8!(a, a, [7, 6, 5, 4, 3, 2, 1, 0])
+ simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0])
}
/// Reversing vector elements (swap endianness)
@@ -6376,7 +6376,7 @@ pub unsafe fn vrev64_s8(a: int8x8_t) -> int8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t {
- simd_shuffle16!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8])
+ simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8])
}
/// Reversing vector elements (swap endianness)
@@ -6390,7 +6390,7 @@ pub unsafe fn vrev64q_s8(a: int8x16_t) -> int8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t {
- simd_shuffle4!(a, a, [3, 2, 1, 0])
+ simd_shuffle!(a, a, [3, 2, 1, 0])
}
/// Reversing vector elements (swap endianness)
@@ -6404,7 +6404,7 @@ pub unsafe fn vrev64_s16(a: int16x4_t) -> int16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t {
- simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
+ simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
}
/// Reversing vector elements (swap endianness)
@@ -6418,7 +6418,7 @@ pub unsafe fn vrev64q_s16(a: int16x8_t) -> int16x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t {
- simd_shuffle2!(a, a, [1, 0])
+ simd_shuffle!(a, a, [1, 0])
}
/// Reversing vector elements (swap endianness)
@@ -6432,7 +6432,7 @@ pub unsafe fn vrev64_s32(a: int32x2_t) -> int32x2_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t {
- simd_shuffle4!(a, a, [1, 0, 3, 2])
+ simd_shuffle!(a, a, [1, 0, 3, 2])
}
/// Reversing vector elements (swap endianness)
@@ -6446,7 +6446,7 @@ pub unsafe fn vrev64q_s32(a: int32x4_t) -> int32x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t {
- simd_shuffle8!(a, a, [7, 6, 5, 4, 3, 2, 1, 0])
+ simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0])
}
/// Reversing vector elements (swap endianness)
@@ -6460,7 +6460,7 @@ pub unsafe fn vrev64_u8(a: uint8x8_t) -> uint8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t {
- simd_shuffle16!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8])
+ simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8])
}
/// Reversing vector elements (swap endianness)
@@ -6474,7 +6474,7 @@ pub unsafe fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t {
- simd_shuffle4!(a, a, [3, 2, 1, 0])
+ simd_shuffle!(a, a, [3, 2, 1, 0])
}
/// Reversing vector elements (swap endianness)
@@ -6488,7 +6488,7 @@ pub unsafe fn vrev64_u16(a: uint16x4_t) -> uint16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t {
- simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
+ simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
}
/// Reversing vector elements (swap endianness)
@@ -6502,7 +6502,7 @@ pub unsafe fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t {
- simd_shuffle2!(a, a, [1, 0])
+ simd_shuffle!(a, a, [1, 0])
}
/// Reversing vector elements (swap endianness)
@@ -6516,7 +6516,7 @@ pub unsafe fn vrev64_u32(a: uint32x2_t) -> uint32x2_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t {
- simd_shuffle4!(a, a, [1, 0, 3, 2])
+ simd_shuffle!(a, a, [1, 0, 3, 2])
}
/// Reversing vector elements (swap endianness)
@@ -6530,7 +6530,7 @@ pub unsafe fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t {
- simd_shuffle2!(a, a, [1, 0])
+ simd_shuffle!(a, a, [1, 0])
}
/// Reversing vector elements (swap endianness)
@@ -6544,7 +6544,7 @@ pub unsafe fn vrev64_f32(a: float32x2_t) -> float32x2_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t {
- simd_shuffle4!(a, a, [1, 0, 3, 2])
+ simd_shuffle!(a, a, [1, 0, 3, 2])
}
/// Reversing vector elements (swap endianness)
@@ -6558,7 +6558,7 @@ pub unsafe fn vrev64q_f32(a: float32x4_t) -> float32x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t {
- simd_shuffle8!(a, a, [7, 6, 5, 4, 3, 2, 1, 0])
+ simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0])
}
/// Reversing vector elements (swap endianness)
@@ -6572,7 +6572,7 @@ pub unsafe fn vrev64_p8(a: poly8x8_t) -> poly8x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t {
- simd_shuffle16!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8])
+ simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8])
}
/// Reversing vector elements (swap endianness)
@@ -6586,7 +6586,7 @@ pub unsafe fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t {
- simd_shuffle4!(a, a, [3, 2, 1, 0])
+ simd_shuffle!(a, a, [3, 2, 1, 0])
}
/// Reversing vector elements (swap endianness)
@@ -6600,7 +6600,7 @@ pub unsafe fn vrev64_p16(a: poly16x4_t) -> poly16x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t {
- simd_shuffle8!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
+ simd_shuffle!(a, a, [3, 2, 1, 0, 7, 6, 5, 4])
}
/// Signed Add and Accumulate Long Pairwise.
@@ -6922,7 +6922,7 @@ pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
#[cfg_attr(test, assert_instr(nop))]
pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t {
- simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
}
*/
@@ -6933,7 +6933,7 @@ pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t {
- simd_shuffle4!(low, high, [0, 1, 2, 3])
+ simd_shuffle!(low, high, [0, 1, 2, 3])
}
/// Vector combine
@@ -6943,7 +6943,7 @@ pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t {
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t {
- simd_shuffle16!(
+ simd_shuffle!(
low,
high,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
@@ -6957,7 +6957,7 @@ pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t {
#[cfg_attr(test, assert_instr(nop))]
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t {
- simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Vector combine
@@ -6970,7 +6970,7 @@ pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t {
- simd_shuffle16!(
+ simd_shuffle!(
low,
high,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
@@ -6987,7 +6987,7 @@ pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t {
- simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Vector combine
@@ -7000,7 +7000,7 @@ pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t {
- simd_shuffle4!(low, high, [0, 1, 2, 3])
+ simd_shuffle!(low, high, [0, 1, 2, 3])
}
/// Vector combine
@@ -7013,7 +7013,7 @@ pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t {
- simd_shuffle2!(low, high, [0, 1])
+ simd_shuffle!(low, high, [0, 1])
}
/// Vector combine
@@ -7026,7 +7026,7 @@ pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t {
- simd_shuffle16!(
+ simd_shuffle!(
low,
high,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
@@ -7043,7 +7043,7 @@ pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t {
- simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Vector combine
@@ -7057,7 +7057,7 @@ pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t {
- simd_shuffle4!(low, high, [0, 1, 2, 3])
+ simd_shuffle!(low, high, [0, 1, 2, 3])
}
/// Vector combine
@@ -7070,7 +7070,7 @@ pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t {
- simd_shuffle2!(low, high, [0, 1])
+ simd_shuffle!(low, high, [0, 1])
}
/// Vector combine
@@ -7083,7 +7083,7 @@ pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t {
stable(feature = "neon_intrinsics", since = "1.59.0")
)]
pub unsafe fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t {
- simd_shuffle2!(low, high, [0, 1])
+ simd_shuffle!(low, high, [0, 1])
}
#[cfg(test)]
diff --git a/library/stdarch/crates/core_arch/src/core_arch_docs.md b/library/stdarch/crates/core_arch/src/core_arch_docs.md
index eddd1fc0c..c250fc3d3 100644
--- a/library/stdarch/crates/core_arch/src/core_arch_docs.md
+++ b/library/stdarch/crates/core_arch/src/core_arch_docs.md
@@ -47,7 +47,7 @@ the AVX2 feature as [documented by Intel][intel-dox] so to correctly call
this function we need to (a) guarantee we only call it on `x86`/`x86_64`
and (b) ensure that the CPU feature is available
-[intel-dox]: https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_add_epi64&expand=100
+[intel-dox]: https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_add_epi64&expand=100
## Static CPU Feature Detection
diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs
index 5a9727a0a..023947b83 100644
--- a/library/stdarch/crates/core_arch/src/lib.rs
+++ b/library/stdarch/crates/core_arch/src/lib.rs
@@ -21,18 +21,18 @@
sse4a_target_feature,
riscv_target_feature,
arm_target_feature,
- cmpxchg16b_target_feature,
avx512_target_feature,
mips_target_feature,
powerpc_target_feature,
wasm_target_feature,
abi_unadjusted,
rtm_target_feature,
- f16c_target_feature,
allow_internal_unstable,
decl_macro,
asm_const,
- target_feature_11
+ target_feature_11,
+ inline_const,
+ generic_arg_infer
)]
#![cfg_attr(test, feature(test, abi_vectorcall))]
#![deny(clippy::missing_inline_in_public_items)]
diff --git a/library/stdarch/crates/core_arch/src/macros.rs b/library/stdarch/crates/core_arch/src/macros.rs
index 1c917c52b..0c86a24ca 100644
--- a/library/stdarch/crates/core_arch/src/macros.rs
+++ b/library/stdarch/crates/core_arch/src/macros.rs
@@ -1,84 +1,54 @@
//! Utility macros.
-// Helper struct used to trigger const eval errors when the const generic immediate value `IMM` is
-// out of `[MIN-MAX]` range.
-pub(crate) struct ValidateConstImm<const IMM: i32, const MIN: i32, const MAX: i32>;
-impl<const IMM: i32, const MIN: i32, const MAX: i32> ValidateConstImm<IMM, MIN, MAX> {
- pub(crate) const VALID: () = {
- assert!(IMM >= MIN && IMM <= MAX, "IMM value not in expected range");
- };
-}
-
-#[allow(unused_macros)]
-macro_rules! static_assert_imm1 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 1) - 1 }>::VALID;
- };
-}
-
-#[allow(unused_macros)]
-macro_rules! static_assert_imm2 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 2) - 1 }>::VALID;
- };
-}
-
-#[allow(unused_macros)]
-macro_rules! static_assert_imm3 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 3) - 1 }>::VALID;
- };
-}
-
-#[allow(unused_macros)]
-macro_rules! static_assert_imm4 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 4) - 1 }>::VALID;
- };
-}
-
-#[allow(unused_macros)]
-macro_rules! static_assert_imm5 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 5) - 1 }>::VALID;
+#[allow(unused)]
+macro_rules! static_assert {
+ ($e:expr) => {
+ const {
+ assert!($e);
+ }
};
-}
-
-#[allow(unused_macros)]
-macro_rules! static_assert_imm6 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 6) - 1 }>::VALID;
+ ($e:expr, $msg:expr) => {
+ const {
+ assert!($e, $msg);
+ }
};
}
#[allow(unused_macros)]
-macro_rules! static_assert_imm8 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 8) - 1 }>::VALID;
+macro_rules! static_assert_uimm_bits {
+ ($imm:ident, $bits:expr) => {
+ // `0 <= $imm` produces a warning if the immediate has an unsigned type
+ #[allow(unused_comparisons)]
+ {
+ static_assert!(
+ 0 <= $imm && $imm <= (1 << $bits) - 1,
+ concat!(
+ stringify!($imm),
+ " doesn't fit in ",
+ stringify!($bits),
+ " bits",
+ )
+ )
+ }
};
}
#[allow(unused_macros)]
-macro_rules! static_assert_imm16 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, 0, { (1 << 16) - 1 }>::VALID;
+macro_rules! static_assert_simm_bits {
+ ($imm:ident, $bits:expr) => {
+ static_assert!(
+ (-1 << ($bits - 1)) - 1 <= $imm && $imm <= (1 << ($bits - 1)) - 1,
+ concat!(
+ stringify!($imm),
+ " doesn't fit in ",
+ stringify!($bits),
+ " bits",
+ )
+ )
};
}
#[allow(unused)]
-macro_rules! static_assert {
- ($imm:ident : $ty:ty where $e:expr) => {{
- struct Validate<const $imm: $ty>();
- impl<const $imm: $ty> Validate<$imm> {
- const VALID: () = {
- assert!($e, concat!("Assertion failed: ", stringify!($e)));
- };
- }
- let _ = Validate::<$imm>::VALID;
- }};
-}
-
-#[allow(unused)]
macro_rules! types {
($(
$(#[$doc:meta])*
@@ -94,97 +64,15 @@ macro_rules! types {
}
#[allow(unused)]
-macro_rules! simd_shuffle2 {
- ($x:expr, $y:expr, <$(const $imm:ident : $ty:ty),+ $(,)?> $idx:expr $(,)?) => {{
- struct ConstParam<$(const $imm: $ty),+>;
- impl<$(const $imm: $ty),+> ConstParam<$($imm),+> {
- const IDX: [u32; 2] = $idx;
- }
-
- simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX)
- }};
- ($x:expr, $y:expr, $idx:expr $(,)?) => {{
- const IDX: [u32; 2] = $idx;
- simd_shuffle($x, $y, IDX)
- }};
-}
-
-#[allow(unused_macros)]
-macro_rules! simd_shuffle4 {
- ($x:expr, $y:expr, <$(const $imm:ident : $ty:ty),+ $(,)?> $idx:expr $(,)?) => {{
- struct ConstParam<$(const $imm: $ty),+>;
- impl<$(const $imm: $ty),+> ConstParam<$($imm),+> {
- const IDX: [u32; 4] = $idx;
- }
-
- simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX)
- }};
- ($x:expr, $y:expr, $idx:expr $(,)?) => {{
- const IDX: [u32; 4] = $idx;
- simd_shuffle($x, $y, IDX)
- }};
-}
-
-#[allow(unused_macros)]
-macro_rules! simd_shuffle8 {
- ($x:expr, $y:expr, <$(const $imm:ident : $ty:ty),+ $(,)?> $idx:expr $(,)?) => {{
- struct ConstParam<$(const $imm: $ty),+>;
- impl<$(const $imm: $ty),+> ConstParam<$($imm),+> {
- const IDX: [u32; 8] = $idx;
- }
-
- simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX)
- }};
- ($x:expr, $y:expr, $idx:expr $(,)?) => {{
- const IDX: [u32; 8] = $idx;
- simd_shuffle($x, $y, IDX)
- }};
-}
-
-#[allow(unused)]
-macro_rules! simd_shuffle16 {
- ($x:expr, $y:expr, <$(const $imm:ident : $ty:ty),+ $(,)?> $idx:expr $(,)?) => {{
- struct ConstParam<$(const $imm: $ty),+>;
- impl<$(const $imm: $ty),+> ConstParam<$($imm),+> {
- const IDX: [u32; 16] = $idx;
- }
-
- simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX)
- }};
- ($x:expr, $y:expr, $idx:expr $(,)?) => {{
- const IDX: [u32; 16] = $idx;
- simd_shuffle($x, $y, IDX)
- }};
-}
-
-#[allow(unused_macros)]
-macro_rules! simd_shuffle32 {
- ($x:expr, $y:expr, <$(const $imm:ident : $ty:ty),+> $(,)? $idx:expr $(,)?) => {{
- struct ConstParam<$(const $imm: $ty),+>;
- impl<$(const $imm: $ty),+> ConstParam<$($imm),+> {
- const IDX: [u32; 32] = $idx;
- }
-
- simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX)
- }};
- ($x:expr, $y:expr, $idx:expr $(,)?) => {{
- const IDX: [u32; 32] = $idx;
- simd_shuffle($x, $y, IDX)
- }};
-}
-
-#[allow(unused_macros)]
-macro_rules! simd_shuffle64 {
- ($x:expr, $y:expr, <$(const $imm:ident : $ty:ty),+ $(,)?> $idx:expr $(,)?) => {{
- struct ConstParam<$(const $imm: $ty),+>;
- impl<$(const $imm: $ty),+> ConstParam<$($imm),+> {
- const IDX: [u32; 64] = $idx;
- }
-
- simd_shuffle($x, $y, ConstParam::<$($imm),+>::IDX)
- }};
+macro_rules! simd_shuffle {
($x:expr, $y:expr, $idx:expr $(,)?) => {{
- const IDX: [u32; 64] = $idx;
- simd_shuffle($x, $y, IDX)
+ simd_shuffle(
+ $x,
+ $y,
+ const {
+ let v: [u32; _] = $idx;
+ v
+ },
+ )
}};
}
diff --git a/library/stdarch/crates/core_arch/src/mips/msa.rs b/library/stdarch/crates/core_arch/src/mips/msa.rs
index cded73a54..3e93db85e 100644
--- a/library/stdarch/crates/core_arch/src/mips/msa.rs
+++ b/library/stdarch/crates/core_arch/src/mips/msa.rs
@@ -10,9 +10,6 @@ use stdarch_test::assert_instr;
use crate::mem;
-#[macro_use]
-mod macros;
-
types! {
// / MIPS-specific 128-bit wide vector of 16 packed `i8`.
pub struct v16i8(
@@ -1413,7 +1410,7 @@ pub unsafe fn __msa_addv_d(a: v2i64, b: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(addvi.b, imm5 = 0b10111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_addvi_b<const IMM5: i32>(a: v16i8) -> v16i8 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_addvi_b(a, IMM5)
}
@@ -1428,7 +1425,7 @@ pub unsafe fn __msa_addvi_b<const IMM5: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(addvi.h, imm5 = 0b10111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_addvi_h<const IMM5: i32>(a: v8i16) -> v8i16 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_addvi_h(a, IMM5)
}
@@ -1443,7 +1440,7 @@ pub unsafe fn __msa_addvi_h<const IMM5: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(addvi.w, imm5 = 0b10111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_addvi_w<const IMM5: i32>(a: v4i32) -> v4i32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_addvi_w(a, IMM5)
}
@@ -1458,7 +1455,7 @@ pub unsafe fn __msa_addvi_w<const IMM5: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(addvi.d, imm5 = 0b10111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_addvi_d<const IMM5: i32>(a: v2i64) -> v2i64 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_addvi_d(a, IMM5)
}
@@ -1487,7 +1484,7 @@ pub unsafe fn __msa_and_v(a: v16u8, b: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(andi.b, imm8 = 0b10010111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_andi_b<const IMM8: i32>(a: v16u8) -> v16u8 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
msa_andi_b(a, IMM8)
}
@@ -1910,7 +1907,7 @@ pub unsafe fn __msa_bclr_d(a: v2u64, b: v2u64) -> v2u64 {
#[cfg_attr(test, assert_instr(bclri.b, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_bclri_b<const IMM3: i32>(a: v16u8) -> v16u8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_bclri_b(a, IMM3)
}
@@ -1925,7 +1922,7 @@ pub unsafe fn __msa_bclri_b<const IMM3: i32>(a: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(bclri.h, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_bclri_h<const IMM4: i32>(a: v8u16) -> v8u16 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_bclri_h(a, IMM4)
}
@@ -1940,7 +1937,7 @@ pub unsafe fn __msa_bclri_h<const IMM4: i32>(a: v8u16) -> v8u16 {
#[cfg_attr(test, assert_instr(bclri.w, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_bclri_w<const IMM5: i32>(a: v4u32) -> v4u32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_bclri_w(a, IMM5)
}
@@ -1955,7 +1952,7 @@ pub unsafe fn __msa_bclri_w<const IMM5: i32>(a: v4u32) -> v4u32 {
#[cfg_attr(test, assert_instr(bclri.d, imm6 = 0b111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_bclri_d<const IMM6: i32>(a: v2u64) -> v2u64 {
- static_assert_imm6!(IMM6);
+ static_assert_uimm_bits!(IMM6, 6);
msa_bclri_d(a, IMM6)
}
@@ -2026,7 +2023,7 @@ pub unsafe fn __msa_binsl_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 {
#[cfg_attr(test, assert_instr(binsli.b, imm3 = 0b111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_binsli_b<const IMM3: i32>(a: v16u8, b: v16u8) -> v16u8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_binsli_b(a, mem::transmute(b), IMM3)
}
@@ -2041,7 +2038,7 @@ pub unsafe fn __msa_binsli_b<const IMM3: i32>(a: v16u8, b: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(binsli.h, imm4 = 0b1111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_binsli_h<const IMM4: i32>(a: v8u16, b: v8u16) -> v8u16 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_binsli_h(a, mem::transmute(b), IMM4)
}
@@ -2056,7 +2053,7 @@ pub unsafe fn __msa_binsli_h<const IMM4: i32>(a: v8u16, b: v8u16) -> v8u16 {
#[cfg_attr(test, assert_instr(binsli.w, imm5 = 0b11111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_binsli_w<const IMM5: i32>(a: v4u32, b: v4u32) -> v4u32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_binsli_w(a, mem::transmute(b), IMM5)
}
@@ -2071,7 +2068,7 @@ pub unsafe fn __msa_binsli_w<const IMM5: i32>(a: v4u32, b: v4u32) -> v4u32 {
#[cfg_attr(test, assert_instr(binsli.d, imm6 = 0b111111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_binsli_d<const IMM6: i32>(a: v2u64, b: v2u64) -> v2u64 {
- static_assert_imm6!(IMM6);
+ static_assert_uimm_bits!(IMM6, 6);
msa_binsli_d(a, mem::transmute(b), IMM6)
}
@@ -2142,7 +2139,7 @@ pub unsafe fn __msa_binsr_d(a: v2u64, b: v2u64, c: v2u64) -> v2u64 {
#[cfg_attr(test, assert_instr(binsri.b, imm3 = 0b111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_binsri_b<const IMM3: i32>(a: v16u8, b: v16u8) -> v16u8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_binsri_b(a, mem::transmute(b), IMM3)
}
@@ -2157,7 +2154,7 @@ pub unsafe fn __msa_binsri_b<const IMM3: i32>(a: v16u8, b: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(binsri.h, imm4 = 0b1111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_binsri_h<const IMM4: i32>(a: v8u16, b: v8u16) -> v8u16 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_binsri_h(a, mem::transmute(b), IMM4)
}
@@ -2172,7 +2169,7 @@ pub unsafe fn __msa_binsri_h<const IMM4: i32>(a: v8u16, b: v8u16) -> v8u16 {
#[cfg_attr(test, assert_instr(binsri.w, imm5 = 0b11111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_binsri_w<const IMM5: i32>(a: v4u32, b: v4u32) -> v4u32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_binsri_w(a, mem::transmute(b), IMM5)
}
@@ -2187,7 +2184,7 @@ pub unsafe fn __msa_binsri_w<const IMM5: i32>(a: v4u32, b: v4u32) -> v4u32 {
#[cfg_attr(test, assert_instr(binsri.d, imm6 = 0b111111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_binsri_d<const IMM6: i32>(a: v2u64, b: v2u64) -> v2u64 {
- static_assert_imm6!(IMM6);
+ static_assert_uimm_bits!(IMM6, 6);
msa_binsri_d(a, mem::transmute(b), IMM6)
}
@@ -2216,7 +2213,7 @@ pub unsafe fn __msa_bmnz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(bmnzi.b, imm8 = 0b11111111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_bmnzi_b<const IMM8: i32>(a: v16u8, b: v16u8) -> v16u8 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
msa_bmnzi_b(a, mem::transmute(b), IMM8)
}
@@ -2245,7 +2242,7 @@ pub unsafe fn __msa_bmz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(bmzi.b, imm8 = 0b11111111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_bmzi_b<const IMM8: i32>(a: v16u8, b: v16u8) -> v16u8 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
msa_bmzi_b(a, mem::transmute(b), IMM8)
}
@@ -2316,7 +2313,7 @@ pub unsafe fn __msa_bneg_d(a: v2u64, b: v2u64) -> v2u64 {
#[cfg_attr(test, assert_instr(bnegi.b, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_bnegi_b<const IMM3: i32>(a: v16u8) -> v16u8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_bnegi_b(a, IMM3)
}
@@ -2331,7 +2328,7 @@ pub unsafe fn __msa_bnegi_b<const IMM3: i32>(a: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(bnegi.h, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_bnegi_h<const IMM4: i32>(a: v8u16) -> v8u16 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_bnegi_h(a, IMM4)
}
@@ -2346,7 +2343,7 @@ pub unsafe fn __msa_bnegi_h<const IMM4: i32>(a: v8u16) -> v8u16 {
#[cfg_attr(test, assert_instr(bnegi.w, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_bnegi_w<const IMM5: i32>(a: v4u32) -> v4u32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_bnegi_w(a, IMM5)
}
@@ -2361,7 +2358,7 @@ pub unsafe fn __msa_bnegi_w<const IMM5: i32>(a: v4u32) -> v4u32 {
#[cfg_attr(test, assert_instr(bnegi.d, imm6 = 0b111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_bnegi_d<const IMM6: i32>(a: v2u64) -> v2u64 {
- static_assert_imm6!(IMM6);
+ static_assert_uimm_bits!(IMM6, 6);
msa_bnegi_d(a, IMM6)
}
@@ -2446,7 +2443,7 @@ pub unsafe fn __msa_bsel_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(bseli.b, imm8 = 0b11111111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_bseli_b<const IMM8: i32>(a: v16u8, b: v16u8) -> v16u8 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
msa_bseli_b(a, mem::transmute(b), IMM8)
}
@@ -2517,7 +2514,7 @@ pub unsafe fn __msa_bset_d(a: v2u64, b: v2u64) -> v2u64 {
#[cfg_attr(test, assert_instr(bseti.b, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_bseti_b<const IMM3: i32>(a: v16u8) -> v16u8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_bseti_b(a, IMM3)
}
@@ -2532,7 +2529,7 @@ pub unsafe fn __msa_bseti_b<const IMM3: i32>(a: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(bseti.h, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_bseti_h<const IMM4: i32>(a: v8u16) -> v8u16 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_bseti_h(a, IMM4)
}
@@ -2547,7 +2544,7 @@ pub unsafe fn __msa_bseti_h<const IMM4: i32>(a: v8u16) -> v8u16 {
#[cfg_attr(test, assert_instr(bseti.w, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_bseti_w<const IMM5: i32>(a: v4u32) -> v4u32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_bseti_w(a, IMM5)
}
@@ -2562,7 +2559,7 @@ pub unsafe fn __msa_bseti_w<const IMM5: i32>(a: v4u32) -> v4u32 {
#[cfg_attr(test, assert_instr(bseti.d, imm6 = 0b111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_bseti_d<const IMM6: i32>(a: v2u64) -> v2u64 {
- static_assert_imm6!(IMM6);
+ static_assert_uimm_bits!(IMM6, 6);
msa_bseti_d(a, IMM6)
}
@@ -2685,7 +2682,7 @@ pub unsafe fn __msa_ceq_d(a: v2i64, b: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(ceqi.b, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_ceqi_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_ceqi_b(a, IMM_S5)
}
@@ -2700,7 +2697,7 @@ pub unsafe fn __msa_ceqi_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(ceqi.h, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_ceqi_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_ceqi_h(a, IMM_S5)
}
@@ -2715,7 +2712,7 @@ pub unsafe fn __msa_ceqi_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(ceqi.w, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_ceqi_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_ceqi_w(a, IMM_S5)
}
@@ -2730,7 +2727,7 @@ pub unsafe fn __msa_ceqi_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(ceqi.d, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_ceqi_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_ceqi_d(a, IMM_S5)
}
@@ -2744,7 +2741,7 @@ pub unsafe fn __msa_ceqi_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(cfcmsa, imm5 = 0b11111))]
#[rustc_legacy_const_generics(0)]
pub unsafe fn __msa_cfcmsa<const IMM5: i32>() -> i32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_cfcmsa(IMM5)
}
@@ -2872,7 +2869,7 @@ pub unsafe fn __msa_cle_u_d(a: v2u64, b: v2u64) -> v2i64 {
#[cfg_attr(test, assert_instr(clei_s.b, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clei_s_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_clei_s_b(a, IMM_S5)
}
@@ -2888,7 +2885,7 @@ pub unsafe fn __msa_clei_s_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(clei_s.h, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clei_s_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_clei_s_h(a, IMM_S5)
}
@@ -2904,7 +2901,7 @@ pub unsafe fn __msa_clei_s_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(clei_s.w, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clei_s_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_clei_s_w(a, IMM_S5)
}
@@ -2920,7 +2917,7 @@ pub unsafe fn __msa_clei_s_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(clei_s.d, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clei_s_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_clei_s_d(a, IMM_S5)
}
@@ -2936,7 +2933,7 @@ pub unsafe fn __msa_clei_s_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(clei_u.b, imm5 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clei_u_b<const IMM5: i32>(a: v16u8) -> v16i8 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_clei_u_b(a, IMM5)
}
@@ -2952,7 +2949,7 @@ pub unsafe fn __msa_clei_u_b<const IMM5: i32>(a: v16u8) -> v16i8 {
#[cfg_attr(test, assert_instr(clei_u.h, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clei_u_h<const IMM5: i32>(a: v8u16) -> v8i16 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_clei_u_h(a, IMM5)
}
@@ -2968,7 +2965,7 @@ pub unsafe fn __msa_clei_u_h<const IMM5: i32>(a: v8u16) -> v8i16 {
#[cfg_attr(test, assert_instr(clei_u.w, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clei_u_w<const IMM5: i32>(a: v4u32) -> v4i32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_clei_u_w(a, IMM5)
}
@@ -2984,7 +2981,7 @@ pub unsafe fn __msa_clei_u_w<const IMM5: i32>(a: v4u32) -> v4i32 {
#[cfg_attr(test, assert_instr(clei_u.d, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clei_u_d<const IMM5: i32>(a: v2u64) -> v2i64 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_clei_u_d(a, IMM5)
}
@@ -3112,7 +3109,7 @@ pub unsafe fn __msa_clt_u_d(a: v2u64, b: v2u64) -> v2i64 {
#[cfg_attr(test, assert_instr(clti_s.b, imm_s5 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clti_s_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_clti_s_b(a, IMM_S5)
}
@@ -3128,7 +3125,7 @@ pub unsafe fn __msa_clti_s_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(clti_s.h, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clti_s_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_clti_s_h(a, IMM_S5)
}
@@ -3144,7 +3141,7 @@ pub unsafe fn __msa_clti_s_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(clti_s.w, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clti_s_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_clti_s_w(a, IMM_S5)
}
@@ -3160,7 +3157,7 @@ pub unsafe fn __msa_clti_s_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(clti_s.d, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clti_s_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_clti_s_d(a, IMM_S5)
}
@@ -3176,7 +3173,7 @@ pub unsafe fn __msa_clti_s_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(clti_u.b, imm5 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clti_u_b<const IMM5: i32>(a: v16u8) -> v16i8 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_clti_u_b(a, IMM5)
}
@@ -3192,7 +3189,7 @@ pub unsafe fn __msa_clti_u_b<const IMM5: i32>(a: v16u8) -> v16i8 {
#[cfg_attr(test, assert_instr(clti_u.h, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clti_u_h<const IMM5: i32>(a: v8u16) -> v8i16 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_clti_u_h(a, IMM5)
}
@@ -3208,7 +3205,7 @@ pub unsafe fn __msa_clti_u_h<const IMM5: i32>(a: v8u16) -> v8i16 {
#[cfg_attr(test, assert_instr(clti_u.w, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clti_u_w<const IMM5: i32>(a: v4u32) -> v4i32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_clti_u_w(a, IMM5)
}
@@ -3224,7 +3221,7 @@ pub unsafe fn __msa_clti_u_w<const IMM5: i32>(a: v4u32) -> v4i32 {
#[cfg_attr(test, assert_instr(clti_u.d, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_clti_u_d<const IMM5: i32>(a: v2u64) -> v2i64 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_clti_u_d(a, IMM5)
}
@@ -3238,7 +3235,7 @@ pub unsafe fn __msa_clti_u_d<const IMM5: i32>(a: v2u64) -> v2i64 {
#[cfg_attr(test, assert_instr(copy_s.b, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_copy_s_b<const IMM4: i32>(a: v16i8) -> i32 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_copy_s_b(a, IMM4)
}
@@ -3252,7 +3249,7 @@ pub unsafe fn __msa_copy_s_b<const IMM4: i32>(a: v16i8) -> i32 {
#[cfg_attr(test, assert_instr(copy_s.h, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_copy_s_h<const IMM3: i32>(a: v8i16) -> i32 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_copy_s_h(a, IMM3)
}
@@ -3266,7 +3263,7 @@ pub unsafe fn __msa_copy_s_h<const IMM3: i32>(a: v8i16) -> i32 {
#[cfg_attr(test, assert_instr(copy_s.w, imm2 = 0b11))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_copy_s_w<const IMM2: i32>(a: v4i32) -> i32 {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
msa_copy_s_w(a, IMM2)
}
@@ -3280,7 +3277,7 @@ pub unsafe fn __msa_copy_s_w<const IMM2: i32>(a: v4i32) -> i32 {
#[cfg_attr(test, assert_instr(copy_s.d, imm1 = 0b1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_copy_s_d<const IMM1: i32>(a: v2i64) -> i64 {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
msa_copy_s_d(a, IMM1)
}
@@ -3294,7 +3291,7 @@ pub unsafe fn __msa_copy_s_d<const IMM1: i32>(a: v2i64) -> i64 {
#[cfg_attr(test, assert_instr(copy_u.b, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_copy_u_b<const IMM4: i32>(a: v16i8) -> u32 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_copy_u_b(a, IMM4)
}
@@ -3308,7 +3305,7 @@ pub unsafe fn __msa_copy_u_b<const IMM4: i32>(a: v16i8) -> u32 {
#[cfg_attr(test, assert_instr(copy_u.h, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_copy_u_h<const IMM3: i32>(a: v8i16) -> u32 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_copy_u_h(a, IMM3)
}
@@ -3322,7 +3319,7 @@ pub unsafe fn __msa_copy_u_h<const IMM3: i32>(a: v8i16) -> u32 {
#[cfg_attr(test, assert_instr(copy_u.w, imm2 = 0b11))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_copy_u_w<const IMM2: i32>(a: v4i32) -> u32 {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
msa_copy_u_w(a, IMM2)
}
@@ -3336,7 +3333,7 @@ pub unsafe fn __msa_copy_u_w<const IMM2: i32>(a: v4i32) -> u32 {
#[cfg_attr(test, assert_instr(copy_u.d, imm1 = 0b1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_copy_u_d<const IMM1: i32>(a: v2i64) -> u64 {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
msa_copy_u_d(a, IMM1)
}
@@ -3352,7 +3349,7 @@ pub unsafe fn __msa_copy_u_d<const IMM1: i32>(a: v2i64) -> u64 {
#[cfg_attr(test, assert_instr(ctcmsa, imm1 = 0b1))]
#[rustc_legacy_const_generics(0)]
pub unsafe fn __msa_ctcmsa<const IMM5: i32>(a: i32) -> () {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_ctcmsa(IMM5, a)
}
@@ -5568,7 +5565,7 @@ pub unsafe fn __msa_ilvr_d(a: v2i64, b: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(insert.b, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_insert_b<const IMM4: i32>(a: v16i8, c: i32) -> v16i8 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_insert_b(a, IMM4, c)
}
@@ -5583,7 +5580,7 @@ pub unsafe fn __msa_insert_b<const IMM4: i32>(a: v16i8, c: i32) -> v16i8 {
#[cfg_attr(test, assert_instr(insert.h, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_insert_h<const IMM3: i32>(a: v8i16, c: i32) -> v8i16 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_insert_h(a, IMM3, c)
}
@@ -5598,7 +5595,7 @@ pub unsafe fn __msa_insert_h<const IMM3: i32>(a: v8i16, c: i32) -> v8i16 {
#[cfg_attr(test, assert_instr(insert.w, imm2 = 0b11))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_insert_w<const IMM2: i32>(a: v4i32, c: i32) -> v4i32 {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
msa_insert_w(a, IMM2, c)
}
@@ -5613,7 +5610,7 @@ pub unsafe fn __msa_insert_w<const IMM2: i32>(a: v4i32, c: i32) -> v4i32 {
#[cfg_attr(test, assert_instr(insert.d, imm1 = 0b1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_insert_d<const IMM1: i32>(a: v2i64, c: i64) -> v2i64 {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
msa_insert_d(a, IMM1, c)
}
@@ -5628,7 +5625,7 @@ pub unsafe fn __msa_insert_d<const IMM1: i32>(a: v2i64, c: i64) -> v2i64 {
#[cfg_attr(test, assert_instr(insve.b, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_insve_b<const IMM4: i32>(a: v16i8, c: v16i8) -> v16i8 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_insve_b(a, IMM4, c)
}
@@ -5643,7 +5640,7 @@ pub unsafe fn __msa_insve_b<const IMM4: i32>(a: v16i8, c: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(insve.h, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_insve_h<const IMM3: i32>(a: v8i16, c: v8i16) -> v8i16 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_insve_h(a, IMM3, c)
}
@@ -5658,7 +5655,7 @@ pub unsafe fn __msa_insve_h<const IMM3: i32>(a: v8i16, c: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(insve.w, imm2 = 0b11))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_insve_w<const IMM2: i32>(a: v4i32, c: v4i32) -> v4i32 {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
msa_insve_w(a, IMM2, c)
}
@@ -5673,7 +5670,7 @@ pub unsafe fn __msa_insve_w<const IMM2: i32>(a: v4i32, c: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(insve.d, imm1 = 0b1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_insve_d<const IMM1: i32>(a: v2i64, c: v2i64) -> v2i64 {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
msa_insve_d(a, IMM1, c)
}
@@ -5688,7 +5685,7 @@ pub unsafe fn __msa_insve_d<const IMM1: i32>(a: v2i64, c: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(ld.b, imm_s10 = 0b1111111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_ld_b<const IMM_S10: i32>(mem_addr: *mut u8) -> v16i8 {
- static_assert_imm_s10!(IMM_S10);
+ static_assert_simm_bits!(IMM_S10, 10);
msa_ld_b(mem_addr, IMM_S10)
}
@@ -5703,8 +5700,8 @@ pub unsafe fn __msa_ld_b<const IMM_S10: i32>(mem_addr: *mut u8) -> v16i8 {
#[cfg_attr(test, assert_instr(ld.h, imm_s11 = 0b11111111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_ld_h<const IMM_S11: i32>(mem_addr: *mut u8) -> v8i16 {
- static_assert_imm_s11!(IMM_S11);
- static_assert!(IMM_S11: i32 where IMM_S11 % 2 == 0);
+ static_assert_simm_bits!(IMM_S11, 11);
+ static_assert!(IMM_S11 % 2 == 0);
msa_ld_h(mem_addr, IMM_S11)
}
@@ -5719,8 +5716,8 @@ pub unsafe fn __msa_ld_h<const IMM_S11: i32>(mem_addr: *mut u8) -> v8i16 {
#[cfg_attr(test, assert_instr(ld.w, imm_s12 = 0b111111111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_ld_w<const IMM_S12: i32>(mem_addr: *mut u8) -> v4i32 {
- static_assert_imm_s12!(IMM_S12);
- static_assert!(IMM_S12: i32 where IMM_S12 % 4 == 0);
+ static_assert_simm_bits!(IMM_S12, 12);
+ static_assert!(IMM_S12 % 4 == 0);
msa_ld_w(mem_addr, IMM_S12)
}
@@ -5735,8 +5732,8 @@ pub unsafe fn __msa_ld_w<const IMM_S12: i32>(mem_addr: *mut u8) -> v4i32 {
#[cfg_attr(test, assert_instr(ld.d, imm_s13 = 0b1111111111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_ld_d<const IMM_S13: i32>(mem_addr: *mut u8) -> v2i64 {
- static_assert_imm_s13!(IMM_S13);
- static_assert!(IMM_S13: i32 where IMM_S13 % 8 == 0);
+ static_assert_simm_bits!(IMM_S13, 13);
+ static_assert!(IMM_S13 % 8 == 0);
msa_ld_d(mem_addr, IMM_S13)
}
@@ -5751,7 +5748,7 @@ pub unsafe fn __msa_ld_d<const IMM_S13: i32>(mem_addr: *mut u8) -> v2i64 {
#[cfg_attr(test, assert_instr(ldi.b, imm_s10 = 0b1111111111))]
#[rustc_legacy_const_generics(0)]
pub unsafe fn __msa_ldi_b<const IMM_S10: i32>() -> v16i8 {
- static_assert_imm_s10!(IMM_S10);
+ static_assert_simm_bits!(IMM_S10, 10);
msa_ldi_b(IMM_S10)
}
@@ -5766,7 +5763,7 @@ pub unsafe fn __msa_ldi_b<const IMM_S10: i32>() -> v16i8 {
#[cfg_attr(test, assert_instr(ldi.h, imm_s10 = 0b1111111111))]
#[rustc_legacy_const_generics(0)]
pub unsafe fn __msa_ldi_h<const IMM_S10: i32>() -> v8i16 {
- static_assert_imm_s10!(IMM_S10);
+ static_assert_simm_bits!(IMM_S10, 10);
msa_ldi_h(IMM_S10)
}
@@ -5781,7 +5778,7 @@ pub unsafe fn __msa_ldi_h<const IMM_S10: i32>() -> v8i16 {
#[cfg_attr(test, assert_instr(ldi.w, imm_s10 = 0b1111111111))]
#[rustc_legacy_const_generics(0)]
pub unsafe fn __msa_ldi_w<const IMM_S10: i32>() -> v4i32 {
- static_assert_imm_s10!(IMM_S10);
+ static_assert_simm_bits!(IMM_S10, 10);
msa_ldi_w(IMM_S10)
}
@@ -5796,7 +5793,7 @@ pub unsafe fn __msa_ldi_w<const IMM_S10: i32>() -> v4i32 {
#[cfg_attr(test, assert_instr(ldi.d, imm_s10 = 0b1111111111))]
#[rustc_legacy_const_generics(0)]
pub unsafe fn __msa_ldi_d<const IMM_S10: i32>() -> v2i64 {
- static_assert_imm_s10!(IMM_S10);
+ static_assert_simm_bits!(IMM_S10, 10);
msa_ldi_d(IMM_S10)
}
@@ -6087,7 +6084,7 @@ pub unsafe fn __msa_max_u_d(a: v2u64, b: v2u64) -> v2u64 {
#[cfg_attr(test, assert_instr(maxi_s.b, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_maxi_s_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_maxi_s_b(a, IMM_S5)
}
@@ -6102,7 +6099,7 @@ pub unsafe fn __msa_maxi_s_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(maxi_s.h, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_maxi_s_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_maxi_s_h(a, IMM_S5)
}
@@ -6117,7 +6114,7 @@ pub unsafe fn __msa_maxi_s_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(maxi_s.w, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_maxi_s_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_maxi_s_w(a, IMM_S5)
}
@@ -6132,7 +6129,7 @@ pub unsafe fn __msa_maxi_s_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(maxi_s.d, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_maxi_s_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_maxi_s_d(a, IMM_S5)
}
@@ -6147,7 +6144,7 @@ pub unsafe fn __msa_maxi_s_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(maxi_u.b, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_maxi_u_b<const IMM5: i32>(a: v16u8) -> v16u8 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_maxi_u_b(a, IMM5)
}
@@ -6162,7 +6159,7 @@ pub unsafe fn __msa_maxi_u_b<const IMM5: i32>(a: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(maxi_u.h, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_maxi_u_h<const IMM5: i32>(a: v8u16) -> v8u16 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_maxi_u_h(a, IMM5)
}
@@ -6177,7 +6174,7 @@ pub unsafe fn __msa_maxi_u_h<const IMM5: i32>(a: v8u16) -> v8u16 {
#[cfg_attr(test, assert_instr(maxi_u.w, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_maxi_u_w<const IMM5: i32>(a: v4u32) -> v4u32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_maxi_u_w(a, IMM5)
}
@@ -6192,7 +6189,7 @@ pub unsafe fn __msa_maxi_u_w<const IMM5: i32>(a: v4u32) -> v4u32 {
#[cfg_attr(test, assert_instr(maxi_u.d, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_maxi_u_d<const IMM5: i32>(a: v2u64) -> v2u64 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_maxi_u_d(a, IMM5)
}
@@ -6315,7 +6312,7 @@ pub unsafe fn __msa_min_s_d(a: v2i64, b: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(mini_s.b, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_mini_s_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_mini_s_b(a, IMM_S5)
}
@@ -6330,7 +6327,7 @@ pub unsafe fn __msa_mini_s_b<const IMM_S5: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(mini_s.h, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_mini_s_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_mini_s_h(a, IMM_S5)
}
@@ -6345,7 +6342,7 @@ pub unsafe fn __msa_mini_s_h<const IMM_S5: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(mini_s.w, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_mini_s_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_mini_s_w(a, IMM_S5)
}
@@ -6360,7 +6357,7 @@ pub unsafe fn __msa_mini_s_w<const IMM_S5: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(mini_s.d, imm_s5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_mini_s_d<const IMM_S5: i32>(a: v2i64) -> v2i64 {
- static_assert_imm_s5!(IMM_S5);
+ static_assert_simm_bits!(IMM_S5, 5);
msa_mini_s_d(a, IMM_S5)
}
@@ -6427,7 +6424,7 @@ pub unsafe fn __msa_min_u_d(a: v2u64, b: v2u64) -> v2u64 {
#[cfg_attr(test, assert_instr(mini_u.b, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_mini_u_b<const IMM5: i32>(a: v16u8) -> v16u8 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_mini_u_b(a, IMM5)
}
@@ -6442,7 +6439,7 @@ pub unsafe fn __msa_mini_u_b<const IMM5: i32>(a: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(mini_u.h, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_mini_u_h<const IMM5: i32>(a: v8u16) -> v8u16 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_mini_u_h(a, IMM5)
}
@@ -6457,7 +6454,7 @@ pub unsafe fn __msa_mini_u_h<const IMM5: i32>(a: v8u16) -> v8u16 {
#[cfg_attr(test, assert_instr(mini_u.w, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_mini_u_w<const IMM5: i32>(a: v4u32) -> v4u32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_mini_u_w(a, IMM5)
}
@@ -6472,7 +6469,7 @@ pub unsafe fn __msa_mini_u_w<const IMM5: i32>(a: v4u32) -> v4u32 {
#[cfg_attr(test, assert_instr(mini_u.d, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_mini_u_d<const IMM5: i32>(a: v2u64) -> v2u64 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_mini_u_d(a, IMM5)
}
@@ -6958,7 +6955,7 @@ pub unsafe fn __msa_nor_v(a: v16u8, b: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(nori.b, imm8 = 0b11111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_nori_b<const IMM8: i32>(a: v16u8) -> v16u8 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
msa_nori_b(a, IMM8)
}
@@ -6988,7 +6985,7 @@ pub unsafe fn __msa_or_v(a: v16u8, b: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(ori.b, imm8 = 0b11111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_ori_b<const IMM8: i32>(a: v16u8) -> v16u8 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
msa_ori_b(a, IMM8)
}
@@ -7155,7 +7152,7 @@ pub unsafe fn __msa_pcnt_d(a: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(sat_s.b, imm4 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_sat_s_b<const IMM3: i32>(a: v16i8) -> v16i8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_sat_s_b(a, IMM3)
}
@@ -7170,7 +7167,7 @@ pub unsafe fn __msa_sat_s_b<const IMM3: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(sat_s.h, imm3 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_sat_s_h<const IMM4: i32>(a: v8i16) -> v8i16 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_sat_s_h(a, IMM4)
}
@@ -7185,7 +7182,7 @@ pub unsafe fn __msa_sat_s_h<const IMM4: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(sat_s.w, imm2 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_sat_s_w<const IMM5: i32>(a: v4i32) -> v4i32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_sat_s_w(a, IMM5)
}
@@ -7200,7 +7197,7 @@ pub unsafe fn __msa_sat_s_w<const IMM5: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(sat_s.d, imm1 = 0b111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_sat_s_d<const IMM6: i32>(a: v2i64) -> v2i64 {
- static_assert_imm6!(IMM6);
+ static_assert_uimm_bits!(IMM6, 6);
msa_sat_s_d(a, IMM6)
}
@@ -7215,7 +7212,7 @@ pub unsafe fn __msa_sat_s_d<const IMM6: i32>(a: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(sat_u.b, imm4 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_sat_u_b<const IMM3: i32>(a: v16u8) -> v16u8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_sat_u_b(a, IMM3)
}
@@ -7230,7 +7227,7 @@ pub unsafe fn __msa_sat_u_b<const IMM3: i32>(a: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(sat_u.h, imm3 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_sat_u_h<const IMM4: i32>(a: v8u16) -> v8u16 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_sat_u_h(a, IMM4)
}
@@ -7245,7 +7242,7 @@ pub unsafe fn __msa_sat_u_h<const IMM4: i32>(a: v8u16) -> v8u16 {
#[cfg_attr(test, assert_instr(sat_u.w, imm2 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_sat_u_w<const IMM5: i32>(a: v4u32) -> v4u32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_sat_u_w(a, IMM5)
}
@@ -7260,7 +7257,7 @@ pub unsafe fn __msa_sat_u_w<const IMM5: i32>(a: v4u32) -> v4u32 {
#[cfg_attr(test, assert_instr(sat_u.d, imm1 = 0b111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_sat_u_d<const IMM6: i32>(a: v2u64) -> v2u64 {
- static_assert_imm6!(IMM6);
+ static_assert_uimm_bits!(IMM6, 6);
msa_sat_u_d(a, IMM6)
}
@@ -7276,7 +7273,7 @@ pub unsafe fn __msa_sat_u_d<const IMM6: i32>(a: v2u64) -> v2u64 {
#[cfg_attr(test, assert_instr(shf.b, imm8 = 0b11111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_shf_b<const IMM8: i32>(a: v16i8) -> v16i8 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
msa_shf_b(a, IMM8)
}
@@ -7292,7 +7289,7 @@ pub unsafe fn __msa_shf_b<const IMM8: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(shf.h, imm8 = 0b11111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_shf_h<const IMM8: i32>(a: v8i16) -> v8i16 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
msa_shf_h(a, IMM8)
}
@@ -7308,7 +7305,7 @@ pub unsafe fn __msa_shf_h<const IMM8: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(shf.w, imm8 = 0b11111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_shf_w<const IMM8: i32>(a: v4i32) -> v4i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
msa_shf_w(a, IMM8)
}
@@ -7408,7 +7405,7 @@ pub unsafe fn __msa_sld_d(a: v2i64, b: v2i64, c: i32) -> v2i64 {
#[cfg_attr(test, assert_instr(sldi.b, imm4 = 0b1111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_sldi_b<const IMM4: i32>(a: v16i8, b: v16i8) -> v16i8 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_sldi_b(a, mem::transmute(b), IMM4)
}
@@ -7428,7 +7425,7 @@ pub unsafe fn __msa_sldi_b<const IMM4: i32>(a: v16i8, b: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(sldi.h, imm3 = 0b111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_sldi_h<const IMM3: i32>(a: v8i16, b: v8i16) -> v8i16 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_sldi_h(a, mem::transmute(b), IMM3)
}
@@ -7448,7 +7445,7 @@ pub unsafe fn __msa_sldi_h<const IMM3: i32>(a: v8i16, b: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(sldi.w, imm2 = 0b11))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_sldi_w<const IMM2: i32>(a: v4i32, b: v4i32) -> v4i32 {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
msa_sldi_w(a, mem::transmute(b), IMM2)
}
@@ -7468,7 +7465,7 @@ pub unsafe fn __msa_sldi_w<const IMM2: i32>(a: v4i32, b: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(sldi.d, imm1 = 0b1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_sldi_d<const IMM1: i32>(a: v2i64, b: v2i64) -> v2i64 {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
msa_sldi_d(a, mem::transmute(b), IMM1)
}
@@ -7539,7 +7536,7 @@ pub unsafe fn __msa_sll_d(a: v2i64, b: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(slli.b, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_slli_b<const IMM4: i32>(a: v16i8) -> v16i8 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_slli_b(a, IMM4)
}
@@ -7554,7 +7551,7 @@ pub unsafe fn __msa_slli_b<const IMM4: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(slli.h, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_slli_h<const IMM3: i32>(a: v8i16) -> v8i16 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_slli_h(a, IMM3)
}
@@ -7569,7 +7566,7 @@ pub unsafe fn __msa_slli_h<const IMM3: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(slli.w, imm2 = 0b11))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_slli_w<const IMM2: i32>(a: v4i32) -> v4i32 {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
msa_slli_w(a, IMM2)
}
@@ -7584,7 +7581,7 @@ pub unsafe fn __msa_slli_w<const IMM2: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(slli.d, imm1 = 0b1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_slli_d<const IMM1: i32>(a: v2i64) -> v2i64 {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
msa_slli_d(a, IMM1)
}
@@ -7654,7 +7651,7 @@ pub unsafe fn __msa_splat_d(a: v2i64, b: i32) -> v2i64 {
#[cfg_attr(test, assert_instr(splati.b, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_splati_b<const IMM4: i32>(a: v16i8) -> v16i8 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_splati_b(a, IMM4)
}
@@ -7668,7 +7665,7 @@ pub unsafe fn __msa_splati_b<const IMM4: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(splati.h, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_splati_h<const IMM3: i32>(a: v8i16) -> v8i16 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_splati_h(a, IMM3)
}
@@ -7682,7 +7679,7 @@ pub unsafe fn __msa_splati_h<const IMM3: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(splati.w, imm2 = 0b11))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_splati_w<const IMM2: i32>(a: v4i32) -> v4i32 {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
msa_splati_w(a, IMM2)
}
@@ -7696,7 +7693,7 @@ pub unsafe fn __msa_splati_w<const IMM2: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(splati.d, imm1 = 0b1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_splati_d<const IMM1: i32>(a: v2i64) -> v2i64 {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
msa_splati_d(a, IMM1)
}
@@ -7767,7 +7764,7 @@ pub unsafe fn __msa_sra_d(a: v2i64, b: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(srai.b, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srai_b<const IMM3: i32>(a: v16i8) -> v16i8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_srai_b(a, IMM3)
}
@@ -7782,7 +7779,7 @@ pub unsafe fn __msa_srai_b<const IMM3: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(srai.h, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srai_h<const IMM4: i32>(a: v8i16) -> v8i16 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_srai_h(a, IMM4)
}
@@ -7797,7 +7794,7 @@ pub unsafe fn __msa_srai_h<const IMM4: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(srai.w, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srai_w<const IMM5: i32>(a: v4i32) -> v4i32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_srai_w(a, IMM5)
}
@@ -7812,7 +7809,7 @@ pub unsafe fn __msa_srai_w<const IMM5: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(srai.d, imm6 = 0b111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srai_d<const IMM6: i32>(a: v2i64) -> v2i64 {
- static_assert_imm6!(IMM6);
+ static_assert_uimm_bits!(IMM6, 6);
msa_srai_d(a, IMM6)
}
@@ -7888,7 +7885,7 @@ pub unsafe fn __msa_srar_d(a: v2i64, b: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(srari.b, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srari_b<const IMM3: i32>(a: v16i8) -> v16i8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_srari_b(a, IMM3)
}
@@ -7904,7 +7901,7 @@ pub unsafe fn __msa_srari_b<const IMM3: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(srari.h, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srari_h<const IMM4: i32>(a: v8i16) -> v8i16 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_srari_h(a, IMM4)
}
@@ -7920,7 +7917,7 @@ pub unsafe fn __msa_srari_h<const IMM4: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(srari.w, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srari_w<const IMM5: i32>(a: v4i32) -> v4i32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_srari_w(a, IMM5)
}
@@ -7936,7 +7933,7 @@ pub unsafe fn __msa_srari_w<const IMM5: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(srari.d, imm6 = 0b111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srari_d<const IMM6: i32>(a: v2i64) -> v2i64 {
- static_assert_imm6!(IMM6);
+ static_assert_uimm_bits!(IMM6, 6);
msa_srari_d(a, IMM6)
}
@@ -8007,7 +8004,7 @@ pub unsafe fn __msa_srl_d(a: v2i64, b: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(srli.b, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srli_b<const IMM4: i32>(a: v16i8) -> v16i8 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_srli_b(a, IMM4)
}
@@ -8022,7 +8019,7 @@ pub unsafe fn __msa_srli_b<const IMM4: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(srli.h, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srli_h<const IMM3: i32>(a: v8i16) -> v8i16 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_srli_h(a, IMM3)
}
@@ -8037,7 +8034,7 @@ pub unsafe fn __msa_srli_h<const IMM3: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(srli.w, imm2 = 0b11))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srli_w<const IMM2: i32>(a: v4i32) -> v4i32 {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
msa_srli_w(a, IMM2)
}
@@ -8052,7 +8049,7 @@ pub unsafe fn __msa_srli_w<const IMM2: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(srli.d, imm1 = 0b1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srli_d<const IMM1: i32>(a: v2i64) -> v2i64 {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
msa_srli_d(a, IMM1)
}
@@ -8128,7 +8125,7 @@ pub unsafe fn __msa_srlr_d(a: v2i64, b: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(srlri.b, imm3 = 0b111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srlri_b<const IMM3: i32>(a: v16i8) -> v16i8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
msa_srlri_b(a, IMM3)
}
@@ -8144,7 +8141,7 @@ pub unsafe fn __msa_srlri_b<const IMM3: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(srlri.h, imm4 = 0b1111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srlri_h<const IMM4: i32>(a: v8i16) -> v8i16 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
msa_srlri_h(a, IMM4)
}
@@ -8160,7 +8157,7 @@ pub unsafe fn __msa_srlri_h<const IMM4: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(srlri.w, imm5 = 0b11111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srlri_w<const IMM5: i32>(a: v4i32) -> v4i32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_srlri_w(a, IMM5)
}
@@ -8176,7 +8173,7 @@ pub unsafe fn __msa_srlri_w<const IMM5: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(srlri.d, imm6 = 0b111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_srlri_d<const IMM6: i32>(a: v2i64) -> v2i64 {
- static_assert_imm6!(IMM6);
+ static_assert_uimm_bits!(IMM6, 6);
msa_srlri_d(a, IMM6)
}
@@ -8191,7 +8188,7 @@ pub unsafe fn __msa_srlri_d<const IMM6: i32>(a: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(st.b, imm_s10 = 0b1111111111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_st_b<const IMM_S10: i32>(a: v16i8, mem_addr: *mut u8) -> () {
- static_assert_imm_s10!(IMM_S10);
+ static_assert_simm_bits!(IMM_S10, 10);
msa_st_b(a, mem_addr, IMM_S10)
}
@@ -8206,8 +8203,8 @@ pub unsafe fn __msa_st_b<const IMM_S10: i32>(a: v16i8, mem_addr: *mut u8) -> ()
#[cfg_attr(test, assert_instr(st.h, imm_s11 = 0b11111111111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_st_h<const IMM_S11: i32>(a: v8i16, mem_addr: *mut u8) -> () {
- static_assert_imm_s11!(IMM_S11);
- static_assert!(IMM_S11: i32 where IMM_S11 % 2 == 0);
+ static_assert_simm_bits!(IMM_S11, 11);
+ static_assert!(IMM_S11 % 2 == 0);
msa_st_h(a, mem_addr, IMM_S11)
}
@@ -8222,8 +8219,8 @@ pub unsafe fn __msa_st_h<const IMM_S11: i32>(a: v8i16, mem_addr: *mut u8) -> ()
#[cfg_attr(test, assert_instr(st.w, imm_s12 = 0b111111111111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_st_w<const IMM_S12: i32>(a: v4i32, mem_addr: *mut u8) -> () {
- static_assert_imm_s12!(IMM_S12);
- static_assert!(IMM_S12: i32 where IMM_S12 % 4 == 0);
+ static_assert_simm_bits!(IMM_S12, 12);
+ static_assert!(IMM_S12 % 4 == 0);
msa_st_w(a, mem_addr, IMM_S12)
}
@@ -8238,8 +8235,8 @@ pub unsafe fn __msa_st_w<const IMM_S12: i32>(a: v4i32, mem_addr: *mut u8) -> ()
#[cfg_attr(test, assert_instr(st.d, imm_s13 = 0b1111111111111))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn __msa_st_d<const IMM_S13: i32>(a: v2i64, mem_addr: *mut u8) -> () {
- static_assert_imm_s13!(IMM_S13);
- static_assert!(IMM_S13: i32 where IMM_S13 % 8 == 0);
+ static_assert_simm_bits!(IMM_S13, 13);
+ static_assert!(IMM_S13 % 8 == 0);
msa_st_d(a, mem_addr, IMM_S13)
}
@@ -8530,7 +8527,7 @@ pub unsafe fn __msa_subv_d(a: v2i64, b: v2i64) -> v2i64 {
#[cfg_attr(test, assert_instr(subvi.b, imm5 = 0b10111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_subvi_b<const IMM5: i32>(a: v16i8) -> v16i8 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_subvi_b(a, IMM5)
}
@@ -8545,7 +8542,7 @@ pub unsafe fn __msa_subvi_b<const IMM5: i32>(a: v16i8) -> v16i8 {
#[cfg_attr(test, assert_instr(subvi.h, imm5 = 0b10111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_subvi_h<const IMM5: i32>(a: v8i16) -> v8i16 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_subvi_h(a, IMM5)
}
@@ -8560,7 +8557,7 @@ pub unsafe fn __msa_subvi_h<const IMM5: i32>(a: v8i16) -> v8i16 {
#[cfg_attr(test, assert_instr(subvi.w, imm5 = 0b10111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_subvi_w<const IMM5: i32>(a: v4i32) -> v4i32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_subvi_w(a, IMM5)
}
@@ -8575,7 +8572,7 @@ pub unsafe fn __msa_subvi_w<const IMM5: i32>(a: v4i32) -> v4i32 {
#[cfg_attr(test, assert_instr(subvi.d, imm5 = 0b10111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_subvi_d<const IMM5: i32>(a: v2i64) -> v2i64 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
msa_subvi_d(a, IMM5)
}
@@ -8673,7 +8670,7 @@ pub unsafe fn __msa_xor_v(a: v16u8, b: v16u8) -> v16u8 {
#[cfg_attr(test, assert_instr(xori.b, imm8 = 0b11111111))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn __msa_xori_b<const IMM8: i32>(a: v16u8) -> v16u8 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
msa_xori_b(a, IMM8)
}
diff --git a/library/stdarch/crates/core_arch/src/mips/msa/macros.rs b/library/stdarch/crates/core_arch/src/mips/msa/macros.rs
deleted file mode 100644
index de8905840..000000000
--- a/library/stdarch/crates/core_arch/src/mips/msa/macros.rs
+++ /dev/null
@@ -1,31 +0,0 @@
-//! Utility macros.
-
-macro_rules! static_assert_imm_s5 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, -16, 15>::VALID;
- };
-}
-
-macro_rules! static_assert_imm_s10 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, -512, 511>::VALID;
- };
-}
-
-macro_rules! static_assert_imm_s11 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, -1024, 1023>::VALID;
- };
-}
-
-macro_rules! static_assert_imm_s12 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, -2048, 2047>::VALID;
- };
-}
-
-macro_rules! static_assert_imm_s13 {
- ($imm:ident) => {
- let _ = $crate::core_arch::macros::ValidateConstImm::<$imm, -4096, 4095>::VALID;
- };
-}
diff --git a/library/stdarch/crates/core_arch/src/mod.rs b/library/stdarch/crates/core_arch/src/mod.rs
index 231d89e33..12a5b086c 100644
--- a/library/stdarch/crates/core_arch/src/mod.rs
+++ b/library/stdarch/crates/core_arch/src/mod.rs
@@ -255,8 +255,8 @@ pub mod arch {
/// Platform-specific intrinsics for the `NVPTX` platform.
///
/// See the [module documentation](../index.html) for more details.
- #[cfg(any(target_arch = "nvptx", target_arch = "nvptx64", doc))]
- #[doc(cfg(any(target_arch = "nvptx", target_arch = "nvptx64")))]
+ #[cfg(any(target_arch = "nvptx64", doc))]
+ #[doc(cfg(target_arch = "nvptx64"))]
#[unstable(feature = "stdsimd", issue = "27731")]
pub mod nvptx {
pub use crate::core_arch::nvptx::*;
@@ -299,6 +299,6 @@ mod powerpc;
#[doc(cfg(target_arch = "powerpc64"))]
mod powerpc64;
-#[cfg(any(target_arch = "nvptx", target_arch = "nvptx64", doc))]
-#[doc(cfg(any(target_arch = "nvptx", target_arch = "nvptx64")))]
+#[cfg(any(target_arch = "nvptx64", doc))]
+#[doc(cfg(target_arch = "nvptx64"))]
mod nvptx;
diff --git a/library/stdarch/crates/core_arch/src/powerpc/vsx.rs b/library/stdarch/crates/core_arch/src/powerpc/vsx.rs
index 3141bc8bc..283a7e5ce 100644
--- a/library/stdarch/crates/core_arch/src/powerpc/vsx.rs
+++ b/library/stdarch/crates/core_arch/src/powerpc/vsx.rs
@@ -47,10 +47,10 @@ mod sealed {
#[cfg_attr(all(test, target_endian = "big"), assert_instr(xxspltd, dm = 0x0))]
unsafe fn xxpermdi(a: i64x2, b: i64x2, dm: u8) -> i64x2 {
match dm & 0b11 {
- 0 => simd_shuffle2!(a, b, [0b00, 0b10]),
- 1 => simd_shuffle2!(a, b, [0b01, 0b10]),
- 2 => simd_shuffle2!(a, b, [0b00, 0b11]),
- _ => simd_shuffle2!(a, b, [0b01, 0b11]),
+ 0 => simd_shuffle!(a, b, [0b00, 0b10]),
+ 1 => simd_shuffle!(a, b, [0b01, 0b10]),
+ 2 => simd_shuffle!(a, b, [0b00, 0b11]),
+ _ => simd_shuffle!(a, b, [0b01, 0b11]),
}
}
@@ -80,7 +80,7 @@ pub unsafe fn vec_xxpermdi<T, const DM: i32>(a: T, b: T) -> T
where
T: sealed::VectorPermDI,
{
- static_assert_imm2!(DM);
+ static_assert_uimm_bits!(DM, 2);
a.vec_xxpermdi(b, DM as u8)
}
diff --git a/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs b/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs
index 0e35fe1f1..ed021df5a 100644
--- a/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs
+++ b/library/stdarch/crates/core_arch/src/riscv_shared/mod.rs
@@ -488,7 +488,7 @@ pub unsafe fn hinval_gvma_all() {
/// Register `fcsr` is a 32-bit read/write register that selects the dynamic rounding mode
/// for floating-point arithmetic operations and holds the accrued exception flag.
///
-/// Accoding to "F" Standard Extension for Single-Precision Floating-Point, Version 2.2,
+/// According to "F" Standard Extension for Single-Precision Floating-Point, Version 2.2,
/// register `fcsr` is defined as:
///
/// | Bit index | Meaning |
@@ -521,7 +521,7 @@ pub fn fscsr(value: u32) -> u32 {
/// Reads the floating-point rounding mode register `frm`
///
-/// Accoding to "F" Standard Extension for Single-Precision Floating-Point, Version 2.2,
+/// According to "F" Standard Extension for Single-Precision Floating-Point, Version 2.2,
/// the rounding mode field is defined as listed in the table below:
///
/// | Rounding Mode | Mnemonic | Meaning |
@@ -558,8 +558,8 @@ pub fn fsrm(value: u32) -> u32 {
/// The accrued exception flags indicate the exception conditions that have arisen
/// on any floating-point arithmetic instruction since the field was last reset by software.
///
-/// Accoding to "F" Standard Extension for Single-Precision Floating-Point, Version 2.2,
-/// the accured exception flags is defined as a bit vector of 5 bits.
+/// According to "F" Standard Extension for Single-Precision Floating-Point, Version 2.2,
+/// the accrued exception flags is defined as a bit vector of 5 bits.
/// The meaning of each binary bit is listed in the table below.
///
/// | Bit index | Mnemonic | Meaning |
@@ -691,7 +691,7 @@ pub fn sm3p1(x: u32) -> u32 {
#[inline]
#[target_feature(enable = "zksed")]
pub fn sm4ed<const BS: u8>(x: u32, a: u32) -> u32 {
- static_assert!(BS: u8 where BS <= 3);
+ static_assert!(BS <= 3);
let ans: u32;
unsafe {
asm!("sm4ed {}, {}, {}, {}", lateout(reg) ans, in(reg) x, in(reg) a, const BS, options(pure, nomem, nostack))
@@ -751,7 +751,7 @@ pub fn sm4ed<const BS: u8>(x: u32, a: u32) -> u32 {
#[inline]
#[target_feature(enable = "zksed")]
pub fn sm4ks<const BS: u8>(x: u32, k: u32) -> u32 {
- static_assert!(BS: u8 where BS <= 3);
+ static_assert!(BS <= 3);
let ans: u32;
unsafe {
asm!("sm4ks {}, {}, {}, {}", lateout(reg) ans, in(reg) x, in(reg) k, const BS, options(pure, nomem, nostack))
diff --git a/library/stdarch/crates/core_arch/src/wasm32/memory.rs b/library/stdarch/crates/core_arch/src/wasm32/memory.rs
index b5cf13e98..882e06815 100644
--- a/library/stdarch/crates/core_arch/src/wasm32/memory.rs
+++ b/library/stdarch/crates/core_arch/src/wasm32/memory.rs
@@ -26,7 +26,7 @@ extern "C" {
#[stable(feature = "simd_wasm32", since = "1.33.0")]
#[doc(alias("memory.size"))]
pub fn memory_size<const MEM: u32>() -> usize {
- static_assert!(MEM: u32 where MEM == 0);
+ static_assert!(MEM == 0);
unsafe { llvm_memory_size(MEM) }
}
@@ -52,7 +52,7 @@ pub fn memory_size<const MEM: u32>() -> usize {
#[doc(alias("memory.grow"))]
pub fn memory_grow<const MEM: u32>(delta: usize) -> usize {
unsafe {
- static_assert!(MEM: u32 where MEM == 0);
+ static_assert!(MEM == 0);
llvm_memory_grow(MEM, delta)
}
}
diff --git a/library/stdarch/crates/core_arch/src/wasm32/mod.rs b/library/stdarch/crates/core_arch/src/wasm32/mod.rs
index 2fbe80e99..ed5fc01dd 100644
--- a/library/stdarch/crates/core_arch/src/wasm32/mod.rs
+++ b/library/stdarch/crates/core_arch/src/wasm32/mod.rs
@@ -9,6 +9,9 @@ pub use self::atomic::*;
mod simd128;
pub use self::simd128::*;
+mod relaxed_simd;
+pub use self::relaxed_simd::*;
+
mod memory;
pub use self::memory::*;
diff --git a/library/stdarch/crates/core_arch/src/wasm32/relaxed_simd.rs b/library/stdarch/crates/core_arch/src/wasm32/relaxed_simd.rs
new file mode 100644
index 000000000..8fe935d1f
--- /dev/null
+++ b/library/stdarch/crates/core_arch/src/wasm32/relaxed_simd.rs
@@ -0,0 +1,449 @@
+use super::v128;
+use crate::core_arch::simd;
+
+#[cfg(test)]
+use stdarch_test::assert_instr;
+
+#[allow(improper_ctypes)]
+extern "C" {
+ #[link_name = "llvm.wasm.relaxed.swizzle"]
+ fn llvm_relaxed_swizzle(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
+ #[link_name = "llvm.wasm.relaxed.trunc.signed"]
+ fn llvm_relaxed_trunc_signed(a: simd::f32x4) -> simd::i32x4;
+ #[link_name = "llvm.wasm.relaxed.trunc.unsigned"]
+ fn llvm_relaxed_trunc_unsigned(a: simd::f32x4) -> simd::i32x4;
+ #[link_name = "llvm.wasm.relaxed.trunc.signed.zero"]
+ fn llvm_relaxed_trunc_signed_zero(a: simd::f64x2) -> simd::i32x4;
+ #[link_name = "llvm.wasm.relaxed.trunc.unsigned.zero"]
+ fn llvm_relaxed_trunc_unsigned_zero(a: simd::f64x2) -> simd::i32x4;
+
+ #[link_name = "llvm.wasm.fma.v4f32"]
+ fn llvm_f32x4_fma(a: simd::f32x4, b: simd::f32x4, c: simd::f32x4) -> simd::f32x4;
+ #[link_name = "llvm.wasm.fms.v4f32"]
+ fn llvm_f32x4_fms(a: simd::f32x4, b: simd::f32x4, c: simd::f32x4) -> simd::f32x4;
+ #[link_name = "llvm.wasm.fma.v2f64"]
+ fn llvm_f64x2_fma(a: simd::f64x2, b: simd::f64x2, c: simd::f64x2) -> simd::f64x2;
+ #[link_name = "llvm.wasm.fms.v2f64"]
+ fn llvm_f64x2_fms(a: simd::f64x2, b: simd::f64x2, c: simd::f64x2) -> simd::f64x2;
+
+ #[link_name = "llvm.wasm.laneselect.v16i8"]
+ fn llvm_i8x16_laneselect(a: simd::i8x16, b: simd::i8x16, c: simd::i8x16) -> simd::i8x16;
+ #[link_name = "llvm.wasm.laneselect.v8i16"]
+ fn llvm_i16x8_laneselect(a: simd::i16x8, b: simd::i16x8, c: simd::i16x8) -> simd::i16x8;
+ #[link_name = "llvm.wasm.laneselect.v4i32"]
+ fn llvm_i32x4_laneselect(a: simd::i32x4, b: simd::i32x4, c: simd::i32x4) -> simd::i32x4;
+ #[link_name = "llvm.wasm.laneselect.v2i64"]
+ fn llvm_i64x2_laneselect(a: simd::i64x2, b: simd::i64x2, c: simd::i64x2) -> simd::i64x2;
+
+ #[link_name = "llvm.wasm.relaxed.min.v4f32"]
+ fn llvm_f32x4_relaxed_min(a: simd::f32x4, b: simd::f32x4) -> simd::f32x4;
+ #[link_name = "llvm.wasm.relaxed.min.v2f64"]
+ fn llvm_f64x2_relaxed_min(a: simd::f64x2, b: simd::f64x2) -> simd::f64x2;
+ #[link_name = "llvm.wasm.relaxed.max.v4f32"]
+ fn llvm_f32x4_relaxed_max(a: simd::f32x4, b: simd::f32x4) -> simd::f32x4;
+ #[link_name = "llvm.wasm.relaxed.max.v2f64"]
+ fn llvm_f64x2_relaxed_max(a: simd::f64x2, b: simd::f64x2) -> simd::f64x2;
+
+ #[link_name = "llvm.wasm.relaxed.q15mulr.signed"]
+ fn llvm_relaxed_q15mulr_signed(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
+ #[link_name = "llvm.wasm.dot.i8x16.i7x16.signed"]
+ fn llvm_i16x8_relaxed_dot_i8x16_i7x16_s(a: simd::i8x16, b: simd::i8x16) -> simd::i16x8;
+ #[link_name = "llvm.wasm.dot.i8x16.i7x16.add.signed"]
+ fn llvm_i32x4_relaxed_dot_i8x16_i7x16_add_s(
+ a: simd::i8x16,
+ b: simd::i8x16,
+ c: simd::i32x4,
+ ) -> simd::i32x4;
+}
+
+/// A relaxed version of `i8x16_swizzle(a, s)` which selects lanes from `a`
+/// using indices in `s`.
+///
+/// Indices in the range `[0,15]` will select the `i`-th element of `a`.
+/// If the high bit of any element of `s` is set (meaning 128 or greater) then
+/// the corresponding output lane is guaranteed to be zero. Otherwise if the
+/// element of `s` is within the range `[16,128)` then the output lane is either
+/// 0 or `a[s[i] % 16]` depending on the implementation.
+#[inline]
+#[cfg_attr(test, assert_instr(i8x16.relaxed_swizzle))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("i8x16.relaxed_swizzle"))]
+pub fn i8x16_relaxed_swizzle(a: v128, s: v128) -> v128 {
+ unsafe { llvm_relaxed_swizzle(a.as_i8x16(), s.as_i8x16()).v128() }
+}
+
+/// A relaxed version of `i32x4_trunc_sat_f32x4(a)` converts the `f32` lanes
+/// of `a` to signed 32-bit integers.
+///
+/// Values which don't fit in 32-bit integers or are NaN may have the same
+/// result as `i32x4_trunc_sat_f32x4` or may return `i32::MIN`.
+#[inline]
+#[cfg_attr(test, assert_instr(i32x4.relaxed_trunc_f32x4_s))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("i32x4.relaxed_trunc_f32x4_s"))]
+pub fn i32x4_relaxed_trunc_f32x4(a: v128) -> v128 {
+ unsafe { llvm_relaxed_trunc_signed(a.as_f32x4()).v128() }
+}
+
+/// A relaxed version of `u32x4_trunc_sat_f32x4(a)` converts the `f32` lanes
+/// of `a` to unsigned 32-bit integers.
+///
+/// Values which don't fit in 32-bit unsigned integers or are NaN may have the
+/// same result as `u32x4_trunc_sat_f32x4` or may return `u32::MAX`.
+#[inline]
+#[cfg_attr(test, assert_instr(i32x4.relaxed_trunc_f32x4_u))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("i32x4.relaxed_trunc_f32x4_u"))]
+pub fn u32x4_relaxed_trunc_f32x4(a: v128) -> v128 {
+ unsafe { llvm_relaxed_trunc_unsigned(a.as_f32x4()).v128() }
+}
+
+/// A relaxed version of `i32x4_trunc_sat_f64x2_zero(a)` converts the `f64`
+/// lanes of `a` to signed 32-bit integers and the upper two lanes are zero.
+///
+/// Values which don't fit in 32-bit integers or are NaN may have the same
+/// result as `i32x4_trunc_sat_f32x4` or may return `i32::MIN`.
+#[inline]
+#[cfg_attr(test, assert_instr(i32x4.relaxed_trunc_f64x2_s_zero))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("i32x4.relaxed_trunc_f64x2_s_zero"))]
+pub fn i32x4_relaxed_trunc_f64x2_zero(a: v128) -> v128 {
+ unsafe { llvm_relaxed_trunc_signed_zero(a.as_f64x2()).v128() }
+}
+
+/// A relaxed version of `u32x4_trunc_sat_f64x2_zero(a)` converts the `f64`
+/// lanes of `a` to unsigned 32-bit integers and the upper two lanes are zero.
+///
+/// Values which don't fit in 32-bit unsigned integers or are NaN may have the
+/// same result as `u32x4_trunc_sat_f32x4` or may return `u32::MAX`.
+#[inline]
+#[cfg_attr(test, assert_instr(i32x4.relaxed_trunc_f64x2_u_zero))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("i32x4.relaxed_trunc_f64x2_u_zero"))]
+pub fn u32x4_relaxed_trunc_f64x2_zero(a: v128) -> v128 {
+ unsafe { llvm_relaxed_trunc_unsigned_zero(a.as_f64x2()).v128() }
+}
+
+/// Computes `a * b + c` with either one rounding or two roundings.
+#[inline]
+#[cfg_attr(test, assert_instr(f32x4.relaxed_madd))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("f32x4.relaxed_madd"))]
+pub fn f32x4_relaxed_madd(a: v128, b: v128, c: v128) -> v128 {
+ unsafe { llvm_f32x4_fma(a.as_f32x4(), b.as_f32x4(), c.as_f32x4()).v128() }
+}
+
+/// Computes `-a * b + c` with either one rounding or two roundings.
+#[inline]
+#[cfg_attr(test, assert_instr(f32x4.relaxed_nmadd))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("f32x4.relaxed_nmadd"))]
+pub fn f32x4_relaxed_nmadd(a: v128, b: v128, c: v128) -> v128 {
+ unsafe { llvm_f32x4_fms(a.as_f32x4(), b.as_f32x4(), c.as_f32x4()).v128() }
+}
+
+/// Computes `a * b + c` with either one rounding or two roundings.
+#[inline]
+#[cfg_attr(test, assert_instr(f64x2.relaxed_madd))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("f64x2.relaxed_madd"))]
+pub fn f64x2_relaxed_madd(a: v128, b: v128, c: v128) -> v128 {
+ unsafe { llvm_f64x2_fma(a.as_f64x2(), b.as_f64x2(), c.as_f64x2()).v128() }
+}
+
+/// Computes `-a * b + c` with either one rounding or two roundings.
+#[inline]
+#[cfg_attr(test, assert_instr(f64x2.relaxed_nmadd))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("f64x2.relaxed_nmadd"))]
+pub fn f64x2_relaxed_nmadd(a: v128, b: v128, c: v128) -> v128 {
+ unsafe { llvm_f64x2_fms(a.as_f64x2(), b.as_f64x2(), c.as_f64x2()).v128() }
+}
+
+/// A relaxed version of `v128_bitselect` where this either behaves the same as
+/// `v128_bitselect` or the high bit of each lane `m` is inspected and the
+/// corresponding lane of `a` is chosen if the bit is 1 or the lane of `b` is
+/// chosen if it's zero.
+///
+/// If the `m` mask's lanes are either all-one or all-zero then this instruction
+/// is the same as `v128_bitselect`.
+#[inline]
+#[cfg_attr(test, assert_instr(i8x16.relaxed_laneselect))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("i8x16.relaxed_laneselect"))]
+pub fn i8x16_relaxed_laneselect(a: v128, b: v128, m: v128) -> v128 {
+ unsafe { llvm_i8x16_laneselect(a.as_i8x16(), b.as_i8x16(), m.as_i8x16()).v128() }
+}
+
+/// A relaxed version of `v128_bitselect` where this either behaves the same as
+/// `v128_bitselect` or the high bit of each lane `m` is inspected and the
+/// corresponding lane of `a` is chosen if the bit is 1 or the lane of `b` is
+/// chosen if it's zero.
+///
+/// If the `m` mask's lanes are either all-one or all-zero then this instruction
+/// is the same as `v128_bitselect`.
+#[inline]
+#[cfg_attr(test, assert_instr(i16x8.relaxed_laneselect))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("i16x8.relaxed_laneselect"))]
+pub fn i16x8_relaxed_laneselect(a: v128, b: v128, m: v128) -> v128 {
+ unsafe { llvm_i16x8_laneselect(a.as_i16x8(), b.as_i16x8(), m.as_i16x8()).v128() }
+}
+
+/// A relaxed version of `v128_bitselect` where this either behaves the same as
+/// `v128_bitselect` or the high bit of each lane `m` is inspected and the
+/// corresponding lane of `a` is chosen if the bit is 1 or the lane of `b` is
+/// chosen if it's zero.
+///
+/// If the `m` mask's lanes are either all-one or all-zero then this instruction
+/// is the same as `v128_bitselect`.
+#[inline]
+#[cfg_attr(test, assert_instr(i32x4.relaxed_laneselect))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("i32x4.relaxed_laneselect"))]
+pub fn i32x4_relaxed_laneselect(a: v128, b: v128, m: v128) -> v128 {
+ unsafe { llvm_i32x4_laneselect(a.as_i32x4(), b.as_i32x4(), m.as_i32x4()).v128() }
+}
+
+/// A relaxed version of `v128_bitselect` where this either behaves the same as
+/// `v128_bitselect` or the high bit of each lane `m` is inspected and the
+/// corresponding lane of `a` is chosen if the bit is 1 or the lane of `b` is
+/// chosen if it's zero.
+///
+/// If the `m` mask's lanes are either all-one or all-zero then this instruction
+/// is the same as `v128_bitselect`.
+#[inline]
+#[cfg_attr(test, assert_instr(i64x2.relaxed_laneselect))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("i64x2.relaxed_laneselect"))]
+pub fn i64x2_relaxed_laneselect(a: v128, b: v128, m: v128) -> v128 {
+ unsafe { llvm_i64x2_laneselect(a.as_i64x2(), b.as_i64x2(), m.as_i64x2()).v128() }
+}
+
+/// A relaxed version of `f32x4_min` which is either `f32x4_min` or
+/// `f32x4_pmin`.
+#[inline]
+#[cfg_attr(test, assert_instr(f32x4.relaxed_min))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("f32x4.relaxed_min"))]
+pub fn f32x4_relaxed_min(a: v128, b: v128) -> v128 {
+ unsafe { llvm_f32x4_relaxed_min(a.as_f32x4(), b.as_f32x4()).v128() }
+}
+
+/// A relaxed version of `f32x4_max` which is either `f32x4_max` or
+/// `f32x4_pmax`.
+#[inline]
+#[cfg_attr(test, assert_instr(f32x4.relaxed_max))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("f32x4.relaxed_max"))]
+pub fn f32x4_relaxed_max(a: v128, b: v128) -> v128 {
+ unsafe { llvm_f32x4_relaxed_max(a.as_f32x4(), b.as_f32x4()).v128() }
+}
+
+/// A relaxed version of `f64x2_min` which is either `f64x2_min` or
+/// `f64x2_pmin`.
+#[inline]
+#[cfg_attr(test, assert_instr(f64x2.relaxed_min))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("f64x2.relaxed_min"))]
+pub fn f64x2_relaxed_min(a: v128, b: v128) -> v128 {
+ unsafe { llvm_f64x2_relaxed_min(a.as_f64x2(), b.as_f64x2()).v128() }
+}
+
+/// A relaxed version of `f64x2_max` which is either `f64x2_max` or
+/// `f64x2_pmax`.
+#[inline]
+#[cfg_attr(test, assert_instr(f64x2.relaxed_max))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("f64x2.relaxed_max"))]
+pub fn f64x2_relaxed_max(a: v128, b: v128) -> v128 {
+ unsafe { llvm_f64x2_relaxed_max(a.as_f64x2(), b.as_f64x2()).v128() }
+}
+
+/// A relaxed version of `i16x8_relaxed_q15mulr` where if both lanes are
+/// `i16::MIN` then the result is either `i16::MIN` or `i16::MAX`.
+#[inline]
+#[cfg_attr(test, assert_instr(i16x8.relaxed_q15mulr_s))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("i16x8.relaxed_q15mulr_s"))]
+pub fn i16x8_relaxed_q15mulr(a: v128, b: v128) -> v128 {
+ unsafe { llvm_relaxed_q15mulr_signed(a.as_i16x8(), b.as_i16x8()).v128() }
+}
+
+/// A relaxed dot-product instruction.
+///
+/// This instruction will perform pairwise products of the 8-bit values in `a`
+/// and `b` and then accumulate adjacent pairs into 16-bit results producing a
+/// final `i16x8` vector. The bytes of `a` are always interpreted as signed and
+/// the bytes in `b` may be interpreted as signed or unsigned. If the top bit in
+/// `b` isn't set then the value is the same regardless of whether it's signed
+/// or unsigned.
+///
+/// The accumulation into 16-bit values may be saturated on some platforms, and
+/// on other platforms it may wrap-around on overflow.
+#[inline]
+#[cfg_attr(test, assert_instr(i16x8.relaxed_dot_i8x16_i7x16_s))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("i16x8.relaxed_dot_i8x16_i7x16_s"))]
+pub fn i16x8_relaxed_dot_i8x16_i7x16(a: v128, b: v128) -> v128 {
+ unsafe { llvm_i16x8_relaxed_dot_i8x16_i7x16_s(a.as_i8x16(), b.as_i8x16()).v128() }
+}
+
+/// Similar to [`i16x8_relaxed_dot_i8x16_i7x16`] except that the intermediate
+/// `i16x8` result is fed into `i32x4_extadd_pairwise_i16x8` followed by
+/// `i32x4_add` to add the value `c` to the result.
+#[inline]
+#[cfg_attr(test, assert_instr(i32x4.relaxed_dot_i8x16_i7x16_add_s))]
+#[target_feature(enable = "relaxed-simd")]
+#[doc(alias("i32x4.relaxed_dot_i8x16_i7x16_add_s"))]
+pub fn i32x4_relaxed_dot_i8x16_i7x16_add(a: v128, b: v128, c: v128) -> v128 {
+ unsafe {
+ llvm_i32x4_relaxed_dot_i8x16_i7x16_add_s(a.as_i8x16(), b.as_i8x16(), c.as_i32x4()).v128()
+ }
+}
+
+#[cfg(test)]
+pub mod tests {
+ use super::super::simd128::*;
+ use super::*;
+ use core::ops::{Add, Div, Mul, Neg, Sub};
+ use std;
+ use std::fmt::Debug;
+ use std::mem::transmute;
+ use std::num::Wrapping;
+ use std::prelude::v1::*;
+
+ fn compare_bytes(a: v128, b: &[v128]) {
+ let a: [u8; 16] = unsafe { transmute(a) };
+ if b.iter().any(|b| {
+ let b: [u8; 16] = unsafe { transmute(*b) };
+ a == b
+ }) {
+ return;
+ }
+ eprintln!("input vector {a:?}");
+ eprintln!("did not match any output:");
+ for b in b {
+ eprintln!(" {b:?}");
+ }
+ }
+
+ #[test]
+ fn test_relaxed_swizzle() {
+ compare_bytes(
+ i8x16_relaxed_swizzle(
+ i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
+ i8x16(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1),
+ ),
+ &[i8x16(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1)],
+ );
+ compare_bytes(
+ i8x16_relaxed_swizzle(
+ i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
+ u8x16(0x80, 0xff, 16, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ ),
+ &[
+ i8x16(0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ i8x16(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ ],
+ );
+ }
+
+ #[test]
+ fn test_relaxed_trunc() {
+ compare_bytes(
+ i32x4_relaxed_trunc_f32x4(f32x4(1.0, 2.0, -1., -4.)),
+ &[i32x4(1, 2, -1, -4)],
+ );
+ compare_bytes(
+ i32x4_relaxed_trunc_f32x4(f32x4(f32::NEG_INFINITY, f32::NAN, -0.0, f32::INFINITY)),
+ &[
+ i32x4(i32::MIN, 0, 0, i32::MAX),
+ i32x4(i32::MIN, i32::MIN, 0, i32::MIN),
+ ],
+ );
+ compare_bytes(
+ i32x4_relaxed_trunc_f64x2_zero(f64x2(1.0, -3.0)),
+ &[i32x4(1, -3, 0, 0)],
+ );
+ compare_bytes(
+ i32x4_relaxed_trunc_f64x2_zero(f64x2(f64::INFINITY, f64::NAN)),
+ &[i32x4(i32::MAX, 0, 0, 0), i32x4(i32::MIN, i32::MIN, 0, 0)],
+ );
+
+ compare_bytes(
+ u32x4_relaxed_trunc_f32x4(f32x4(1.0, 2.0, 5., 100.)),
+ &[i32x4(1, 2, 5, 100)],
+ );
+ compare_bytes(
+ u32x4_relaxed_trunc_f32x4(f32x4(f32::NEG_INFINITY, f32::NAN, -0.0, f32::INFINITY)),
+ &[
+ u32x4(u32::MAX, 0, 0, u32::MAX),
+ u32x4(u32::MAX, u32::MAX, 0, u32::MAX),
+ ],
+ );
+ compare_bytes(
+ u32x4_relaxed_trunc_f64x2_zero(f64x2(1.0, 3.0)),
+ &[u32x4(1, 3, 0, 0)],
+ );
+ compare_bytes(
+ u32x4_relaxed_trunc_f64x2_zero(f64x2(f64::INFINITY, f64::NAN)),
+ &[i32x4(i32::MAX, 0, 0, 0), i32x4(i32::MIN, i32::MIN, 0, 0)],
+ );
+ }
+
+ #[test]
+ fn test_madd() {
+ let floats = [
+ f32::NAN,
+ f32::NEG_INFINITY,
+ f32::INFINITY,
+ 1.0,
+ 2.0,
+ -1.0,
+ 0.0,
+ 100.3,
+ 7.8,
+ 9.4,
+ ];
+ for &a in floats.iter() {
+ for &b in floats.iter() {
+ for &c in floats.iter() {
+ let f1 = a * b + c;
+ let f2 = a.mul_add(b, c);
+ compare_bytes(
+ f32x4_relaxed_madd(f32x4(a, a, a, a), f32x4(b, b, b, b), f32x4(c, c, c, c)),
+ &[f32x4(f1, f1, f1, f1), f32x4(f2, f2, f2, f2)],
+ );
+
+ let f1 = -a * b + c;
+ let f2 = (-a).mul_add(b, c);
+ compare_bytes(
+ f32x4_relaxed_nmadd(
+ f32x4(a, a, a, a),
+ f32x4(b, b, b, b),
+ f32x4(c, c, c, c),
+ ),
+ &[f32x4(f1, f1, f1, f1), f32x4(f2, f2, f2, f2)],
+ );
+
+ let a = f64::from(a);
+ let b = f64::from(b);
+ let c = f64::from(c);
+ let f1 = a * b + c;
+ let f2 = a.mul_add(b, c);
+ compare_bytes(
+ f64x2_relaxed_madd(f64x2(a, a), f64x2(b, b), f64x2(c, c)),
+ &[f64x2(f1, f1), f64x2(f2, f2)],
+ );
+ let f1 = -a * b + c;
+ let f2 = (-a).mul_add(b, c);
+ compare_bytes(
+ f64x2_relaxed_nmadd(f64x2(a, a), f64x2(b, b), f64x2(c, c)),
+ &[f64x2(f1, f1), f64x2(f2, f2)],
+ );
+ }
+ }
+ }
+ }
+}
diff --git a/library/stdarch/crates/core_arch/src/wasm32/simd128.rs b/library/stdarch/crates/core_arch/src/wasm32/simd128.rs
index c0025696b..5220fa74f 100644
--- a/library/stdarch/crates/core_arch/src/wasm32/simd128.rs
+++ b/library/stdarch/crates/core_arch/src/wasm32/simd128.rs
@@ -46,7 +46,7 @@ macro_rules! conversions {
impl v128 {
$(
#[inline(always)]
- fn $name(self) -> $ty {
+ pub(crate) fn $name(self) -> $ty {
unsafe { mem::transmute(self) }
}
)*
@@ -55,7 +55,7 @@ macro_rules! conversions {
impl $ty {
#[inline(always)]
#[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
- const fn v128(self) -> v128 {
+ pub(crate) const fn v128(self) -> v128 {
unsafe { mem::transmute(self) }
}
}
@@ -937,47 +937,30 @@ pub fn i8x16_shuffle<
a: v128,
b: v128,
) -> v128 {
- static_assert!(I0: usize where I0 < 32);
- static_assert!(I1: usize where I1 < 32);
- static_assert!(I2: usize where I2 < 32);
- static_assert!(I3: usize where I3 < 32);
- static_assert!(I4: usize where I4 < 32);
- static_assert!(I5: usize where I5 < 32);
- static_assert!(I6: usize where I6 < 32);
- static_assert!(I7: usize where I7 < 32);
- static_assert!(I8: usize where I8 < 32);
- static_assert!(I9: usize where I9 < 32);
- static_assert!(I10: usize where I10 < 32);
- static_assert!(I11: usize where I11 < 32);
- static_assert!(I12: usize where I12 < 32);
- static_assert!(I13: usize where I13 < 32);
- static_assert!(I14: usize where I14 < 32);
- static_assert!(I15: usize where I15 < 32);
+ static_assert!(I0 < 32);
+ static_assert!(I1 < 32);
+ static_assert!(I2 < 32);
+ static_assert!(I3 < 32);
+ static_assert!(I4 < 32);
+ static_assert!(I5 < 32);
+ static_assert!(I6 < 32);
+ static_assert!(I7 < 32);
+ static_assert!(I8 < 32);
+ static_assert!(I9 < 32);
+ static_assert!(I10 < 32);
+ static_assert!(I11 < 32);
+ static_assert!(I12 < 32);
+ static_assert!(I13 < 32);
+ static_assert!(I14 < 32);
+ static_assert!(I15 < 32);
let shuf: simd::u8x16 = unsafe {
- simd_shuffle16!(
+ simd_shuffle!(
a.as_u8x16(),
b.as_u8x16(),
- <
- const I0: usize,
- const I1: usize,
- const I2: usize,
- const I3: usize,
- const I4: usize,
- const I5: usize,
- const I6: usize,
- const I7: usize,
- const I8: usize,
- const I9: usize,
- const I10: usize,
- const I11: usize,
- const I12: usize,
- const I13: usize,
- const I14: usize,
- const I15: usize,
- > [
- I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32, I7 as u32,
- I8 as u32, I9 as u32, I10 as u32, I11 as u32, I12 as u32, I13 as u32, I14 as u32,
- I15 as u32,
+ [
+ I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32,
+ I7 as u32, I8 as u32, I9 as u32, I10 as u32, I11 as u32, I12 as u32, I13 as u32,
+ I14 as u32, I15 as u32,
],
)
};
@@ -1024,29 +1007,21 @@ pub fn i16x8_shuffle<
a: v128,
b: v128,
) -> v128 {
- static_assert!(I0: usize where I0 < 16);
- static_assert!(I1: usize where I1 < 16);
- static_assert!(I2: usize where I2 < 16);
- static_assert!(I3: usize where I3 < 16);
- static_assert!(I4: usize where I4 < 16);
- static_assert!(I5: usize where I5 < 16);
- static_assert!(I6: usize where I6 < 16);
- static_assert!(I7: usize where I7 < 16);
+ static_assert!(I0 < 16);
+ static_assert!(I1 < 16);
+ static_assert!(I2 < 16);
+ static_assert!(I3 < 16);
+ static_assert!(I4 < 16);
+ static_assert!(I5 < 16);
+ static_assert!(I6 < 16);
+ static_assert!(I7 < 16);
let shuf: simd::u16x8 = unsafe {
- simd_shuffle8!(
+ simd_shuffle!(
a.as_u16x8(),
b.as_u16x8(),
- <
- const I0: usize,
- const I1: usize,
- const I2: usize,
- const I3: usize,
- const I4: usize,
- const I5: usize,
- const I6: usize,
- const I7: usize,
- > [
- I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32, I7 as u32,
+ [
+ I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32,
+ I7 as u32,
],
)
};
@@ -1072,15 +1047,15 @@ pub fn i32x4_shuffle<const I0: usize, const I1: usize, const I2: usize, const I3
a: v128,
b: v128,
) -> v128 {
- static_assert!(I0: usize where I0 < 8);
- static_assert!(I1: usize where I1 < 8);
- static_assert!(I2: usize where I2 < 8);
- static_assert!(I3: usize where I3 < 8);
+ static_assert!(I0 < 8);
+ static_assert!(I1 < 8);
+ static_assert!(I2 < 8);
+ static_assert!(I3 < 8);
let shuf: simd::u32x4 = unsafe {
- simd_shuffle4!(
+ simd_shuffle!(
a.as_u32x4(),
b.as_u32x4(),
- <const I0: usize, const I1: usize, const I2: usize, const I3: usize> [I0 as u32, I1 as u32, I2 as u32, I3 as u32],
+ [I0 as u32, I1 as u32, I2 as u32, I3 as u32],
)
};
shuf.v128()
@@ -1102,15 +1077,10 @@ pub use i32x4_shuffle as u32x4_shuffle;
#[doc(alias("i8x16.shuffle"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i64x2_shuffle<const I0: usize, const I1: usize>(a: v128, b: v128) -> v128 {
- static_assert!(I0: usize where I0 < 4);
- static_assert!(I1: usize where I1 < 4);
- let shuf: simd::u64x2 = unsafe {
- simd_shuffle2!(
- a.as_u64x2(),
- b.as_u64x2(),
- <const I0: usize, const I1: usize> [I0 as u32, I1 as u32],
- )
- };
+ static_assert!(I0 < 4);
+ static_assert!(I1 < 4);
+ let shuf: simd::u64x2 =
+ unsafe { simd_shuffle!(a.as_u64x2(), b.as_u64x2(), [I0 as u32, I1 as u32]) };
shuf.v128()
}
@@ -1127,7 +1097,7 @@ pub use i64x2_shuffle as u64x2_shuffle;
#[doc(alias("i8x16.extract_lane_s"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i8x16_extract_lane<const N: usize>(a: v128) -> i8 {
- static_assert!(N: usize where N < 16);
+ static_assert!(N < 16);
unsafe { simd_extract(a.as_i8x16(), N as u32) }
}
@@ -1141,7 +1111,7 @@ pub fn i8x16_extract_lane<const N: usize>(a: v128) -> i8 {
#[doc(alias("i8x16.extract_lane_u"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn u8x16_extract_lane<const N: usize>(a: v128) -> u8 {
- static_assert!(N: usize where N < 16);
+ static_assert!(N < 16);
unsafe { simd_extract(a.as_u8x16(), N as u32) }
}
@@ -1155,7 +1125,7 @@ pub fn u8x16_extract_lane<const N: usize>(a: v128) -> u8 {
#[doc(alias("i8x16.replace_lane"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i8x16_replace_lane<const N: usize>(a: v128, val: i8) -> v128 {
- static_assert!(N: usize where N < 16);
+ static_assert!(N < 16);
unsafe { simd_insert(a.as_i8x16(), N as u32, val).v128() }
}
@@ -1169,7 +1139,7 @@ pub fn i8x16_replace_lane<const N: usize>(a: v128, val: i8) -> v128 {
#[doc(alias("i8x16.replace_lane"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn u8x16_replace_lane<const N: usize>(a: v128, val: u8) -> v128 {
- static_assert!(N: usize where N < 16);
+ static_assert!(N < 16);
unsafe { simd_insert(a.as_u8x16(), N as u32, val).v128() }
}
@@ -1183,7 +1153,7 @@ pub fn u8x16_replace_lane<const N: usize>(a: v128, val: u8) -> v128 {
#[doc(alias("i16x8.extract_lane_s"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i16x8_extract_lane<const N: usize>(a: v128) -> i16 {
- static_assert!(N: usize where N < 8);
+ static_assert!(N < 8);
unsafe { simd_extract(a.as_i16x8(), N as u32) }
}
@@ -1197,7 +1167,7 @@ pub fn i16x8_extract_lane<const N: usize>(a: v128) -> i16 {
#[doc(alias("i16x8.extract_lane_u"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn u16x8_extract_lane<const N: usize>(a: v128) -> u16 {
- static_assert!(N: usize where N < 8);
+ static_assert!(N < 8);
unsafe { simd_extract(a.as_u16x8(), N as u32) }
}
@@ -1211,7 +1181,7 @@ pub fn u16x8_extract_lane<const N: usize>(a: v128) -> u16 {
#[doc(alias("i16x8.replace_lane"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i16x8_replace_lane<const N: usize>(a: v128, val: i16) -> v128 {
- static_assert!(N: usize where N < 8);
+ static_assert!(N < 8);
unsafe { simd_insert(a.as_i16x8(), N as u32, val).v128() }
}
@@ -1225,7 +1195,7 @@ pub fn i16x8_replace_lane<const N: usize>(a: v128, val: i16) -> v128 {
#[doc(alias("i16x8.replace_lane"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn u16x8_replace_lane<const N: usize>(a: v128, val: u16) -> v128 {
- static_assert!(N: usize where N < 8);
+ static_assert!(N < 8);
unsafe { simd_insert(a.as_u16x8(), N as u32, val).v128() }
}
@@ -1239,7 +1209,7 @@ pub fn u16x8_replace_lane<const N: usize>(a: v128, val: u16) -> v128 {
#[doc(alias("i32x4.extract_lane"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i32x4_extract_lane<const N: usize>(a: v128) -> i32 {
- static_assert!(N: usize where N < 4);
+ static_assert!(N < 4);
unsafe { simd_extract(a.as_i32x4(), N as u32) }
}
@@ -1265,7 +1235,7 @@ pub fn u32x4_extract_lane<const N: usize>(a: v128) -> u32 {
#[doc(alias("i32x4.replace_lane"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i32x4_replace_lane<const N: usize>(a: v128, val: i32) -> v128 {
- static_assert!(N: usize where N < 4);
+ static_assert!(N < 4);
unsafe { simd_insert(a.as_i32x4(), N as u32, val).v128() }
}
@@ -1291,7 +1261,7 @@ pub fn u32x4_replace_lane<const N: usize>(a: v128, val: u32) -> v128 {
#[doc(alias("i64x2.extract_lane"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i64x2_extract_lane<const N: usize>(a: v128) -> i64 {
- static_assert!(N: usize where N < 2);
+ static_assert!(N < 2);
unsafe { simd_extract(a.as_i64x2(), N as u32) }
}
@@ -1317,7 +1287,7 @@ pub fn u64x2_extract_lane<const N: usize>(a: v128) -> u64 {
#[doc(alias("i64x2.replace_lane"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i64x2_replace_lane<const N: usize>(a: v128, val: i64) -> v128 {
- static_assert!(N: usize where N < 2);
+ static_assert!(N < 2);
unsafe { simd_insert(a.as_i64x2(), N as u32, val).v128() }
}
@@ -1343,7 +1313,7 @@ pub fn u64x2_replace_lane<const N: usize>(a: v128, val: u64) -> v128 {
#[doc(alias("f32x4.extract_lane"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn f32x4_extract_lane<const N: usize>(a: v128) -> f32 {
- static_assert!(N: usize where N < 4);
+ static_assert!(N < 4);
unsafe { simd_extract(a.as_f32x4(), N as u32) }
}
@@ -1357,7 +1327,7 @@ pub fn f32x4_extract_lane<const N: usize>(a: v128) -> f32 {
#[doc(alias("f32x4.replace_lane"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn f32x4_replace_lane<const N: usize>(a: v128, val: f32) -> v128 {
- static_assert!(N: usize where N < 4);
+ static_assert!(N < 4);
unsafe { simd_insert(a.as_f32x4(), N as u32, val).v128() }
}
@@ -1371,7 +1341,7 @@ pub fn f32x4_replace_lane<const N: usize>(a: v128, val: f32) -> v128 {
#[doc(alias("f64x2.extract_lane"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn f64x2_extract_lane<const N: usize>(a: v128) -> f64 {
- static_assert!(N: usize where N < 2);
+ static_assert!(N < 2);
unsafe { simd_extract(a.as_f64x2(), N as u32) }
}
@@ -1385,7 +1355,7 @@ pub fn f64x2_extract_lane<const N: usize>(a: v128) -> f64 {
#[doc(alias("f64x2.replace_lane"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn f64x2_replace_lane<const N: usize>(a: v128, val: f64) -> v128 {
- static_assert!(N: usize where N < 2);
+ static_assert!(N < 2);
unsafe { simd_insert(a.as_f64x2(), N as u32, val).v128() }
}
@@ -2349,10 +2319,6 @@ pub use i8x16_all_true as u8x16_all_true;
#[doc(alias("i8x16.bitmask"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i8x16_bitmask(a: v128) -> u16 {
- // FIXME(https://bugs.llvm.org/show_bug.cgi?id=50507) - this produces an
- // extraneous `i32.and` instruction against a mask of 65535 when converting
- // from the native intrinsic's i32 return value to our desired u16. This
- // shouldn't be necessary, though, but requires upstream LLVM changes.
unsafe { llvm_bitmask_i8x16(a.as_i8x16()) as u16 }
}
@@ -2686,7 +2652,7 @@ pub fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i16x8_extend_low_i8x16(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle8!(
+ simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
a.as_i8x16(),
a.as_i8x16(),
[0, 1, 2, 3, 4, 5, 6, 7],
@@ -2704,7 +2670,7 @@ pub fn i16x8_extend_low_i8x16(a: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i16x8_extend_high_i8x16(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle8!(
+ simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
a.as_i8x16(),
a.as_i8x16(),
[8, 9, 10, 11, 12, 13, 14, 15],
@@ -2722,7 +2688,7 @@ pub fn i16x8_extend_high_i8x16(a: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i16x8_extend_low_u8x16(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle8!(
+ simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
a.as_u8x16(),
a.as_u8x16(),
[0, 1, 2, 3, 4, 5, 6, 7],
@@ -2743,7 +2709,7 @@ pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16;
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i16x8_extend_high_u8x16(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle8!(
+ simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
a.as_u8x16(),
a.as_u8x16(),
[8, 9, 10, 11, 12, 13, 14, 15],
@@ -2956,12 +2922,12 @@ pub fn u16x8_avgr(a: v128, b: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 {
unsafe {
- let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle8!(
+ let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
a.as_i8x16(),
a.as_i8x16(),
[0, 1, 2, 3, 4, 5, 6, 7],
));
- let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle8!(
+ let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
b.as_i8x16(),
b.as_i8x16(),
[0, 1, 2, 3, 4, 5, 6, 7],
@@ -2981,12 +2947,12 @@ pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 {
unsafe {
- let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle8!(
+ let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
a.as_i8x16(),
a.as_i8x16(),
[8, 9, 10, 11, 12, 13, 14, 15],
));
- let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle8!(
+ let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
b.as_i8x16(),
b.as_i8x16(),
[8, 9, 10, 11, 12, 13, 14, 15],
@@ -3006,12 +2972,12 @@ pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i16x8_extmul_low_u8x16(a: v128, b: v128) -> v128 {
unsafe {
- let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle8!(
+ let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
a.as_u8x16(),
a.as_u8x16(),
[0, 1, 2, 3, 4, 5, 6, 7],
));
- let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle8!(
+ let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
b.as_u8x16(),
b.as_u8x16(),
[0, 1, 2, 3, 4, 5, 6, 7],
@@ -3034,12 +3000,12 @@ pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16;
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i16x8_extmul_high_u8x16(a: v128, b: v128) -> v128 {
unsafe {
- let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle8!(
+ let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
a.as_u8x16(),
a.as_u8x16(),
[8, 9, 10, 11, 12, 13, 14, 15],
));
- let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle8!(
+ let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
b.as_u8x16(),
b.as_u8x16(),
[8, 9, 10, 11, 12, 13, 14, 15],
@@ -3136,7 +3102,7 @@ pub use i32x4_bitmask as u32x4_bitmask;
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i32x4_extend_low_i16x8(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle4!(
+ simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
a.as_i16x8(),
a.as_i16x8(),
[0, 1, 2, 3]
@@ -3154,7 +3120,7 @@ pub fn i32x4_extend_low_i16x8(a: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i32x4_extend_high_i16x8(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle4!(
+ simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
a.as_i16x8(),
a.as_i16x8(),
[4, 5, 6, 7]
@@ -3172,7 +3138,7 @@ pub fn i32x4_extend_high_i16x8(a: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i32x4_extend_low_u16x8(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle4!(
+ simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
a.as_u16x8(),
a.as_u16x8(),
[0, 1, 2, 3]
@@ -3193,7 +3159,7 @@ pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8;
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i32x4_extend_high_u16x8(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle4!(
+ simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
a.as_u16x8(),
a.as_u16x8(),
[4, 5, 6, 7]
@@ -3363,12 +3329,12 @@ pub fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 {
unsafe {
- let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle4!(
+ let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
a.as_i16x8(),
a.as_i16x8(),
[0, 1, 2, 3]
));
- let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle4!(
+ let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
b.as_i16x8(),
b.as_i16x8(),
[0, 1, 2, 3]
@@ -3388,12 +3354,12 @@ pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 {
unsafe {
- let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle4!(
+ let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
a.as_i16x8(),
a.as_i16x8(),
[4, 5, 6, 7]
));
- let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle4!(
+ let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
b.as_i16x8(),
b.as_i16x8(),
[4, 5, 6, 7]
@@ -3413,12 +3379,12 @@ pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i32x4_extmul_low_u16x8(a: v128, b: v128) -> v128 {
unsafe {
- let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle4!(
+ let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
a.as_u16x8(),
a.as_u16x8(),
[0, 1, 2, 3]
));
- let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle4!(
+ let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
b.as_u16x8(),
b.as_u16x8(),
[0, 1, 2, 3]
@@ -3441,12 +3407,12 @@ pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8;
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i32x4_extmul_high_u16x8(a: v128, b: v128) -> v128 {
unsafe {
- let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle4!(
+ let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
a.as_u16x8(),
a.as_u16x8(),
[4, 5, 6, 7]
));
- let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle4!(
+ let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
b.as_u16x8(),
b.as_u16x8(),
[4, 5, 6, 7]
@@ -3460,7 +3426,7 @@ pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8;
/// Lane-wise wrapping absolute value.
#[inline]
-// #[cfg_attr(test, assert_instr(i64x2.abs))] // FIXME llvm
+#[cfg_attr(test, assert_instr(i64x2.abs))]
#[target_feature(enable = "simd128")]
#[doc(alias("i64x2.abs"))]
#[stable(feature = "wasm_simd", since = "1.54.0")]
@@ -3518,7 +3484,7 @@ pub use i64x2_bitmask as u64x2_bitmask;
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i64x2_extend_low_i32x4(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [0, 1]))
+ simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [0, 1]))
.v128()
}
}
@@ -3532,7 +3498,7 @@ pub fn i64x2_extend_low_i32x4(a: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i64x2_extend_high_i32x4(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [2, 3]))
+ simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [2, 3]))
.v128()
}
}
@@ -3546,7 +3512,7 @@ pub fn i64x2_extend_high_i32x4(a: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i64x2_extend_low_u32x4(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [0, 1]))
+ simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [0, 1]))
.v128()
}
}
@@ -3563,7 +3529,7 @@ pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4;
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i64x2_extend_high_u32x4(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [2, 3]))
+ simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [2, 3]))
.v128()
}
}
@@ -3665,12 +3631,12 @@ pub use i64x2_mul as u64x2_mul;
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 {
unsafe {
- let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle2!(
+ let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
a.as_i32x4(),
a.as_i32x4(),
[0, 1]
));
- let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle2!(
+ let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
b.as_i32x4(),
b.as_i32x4(),
[0, 1]
@@ -3690,12 +3656,12 @@ pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 {
unsafe {
- let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle2!(
+ let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
a.as_i32x4(),
a.as_i32x4(),
[2, 3]
));
- let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle2!(
+ let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
b.as_i32x4(),
b.as_i32x4(),
[2, 3]
@@ -3715,12 +3681,12 @@ pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i64x2_extmul_low_u32x4(a: v128, b: v128) -> v128 {
unsafe {
- let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle2!(
+ let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
a.as_u32x4(),
a.as_u32x4(),
[0, 1]
));
- let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle2!(
+ let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
b.as_u32x4(),
b.as_u32x4(),
[0, 1]
@@ -3743,12 +3709,12 @@ pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4;
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i64x2_extmul_high_u32x4(a: v128, b: v128) -> v128 {
unsafe {
- let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle2!(
+ let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
a.as_u32x4(),
a.as_u32x4(),
[2, 3]
));
- let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle2!(
+ let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
b.as_u32x4(),
b.as_u32x4(),
[2, 3]
@@ -4175,7 +4141,7 @@ pub fn f32x4_convert_u32x4(a: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
let ret: simd::i32x4 = unsafe {
- simd_shuffle4!(
+ simd_shuffle!(
llvm_i32x2_trunc_sat_f64x2_s(a.as_f64x2()),
simd::i32x2::splat(0),
[0, 1, 2, 3],
@@ -4199,7 +4165,7 @@ pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
let ret: simd::i32x4 = unsafe {
- simd_shuffle4!(
+ simd_shuffle!(
llvm_i32x2_trunc_sat_f64x2_u(a.as_f64x2()),
simd::i32x2::splat(0),
[0, 1, 2, 3],
@@ -4216,7 +4182,7 @@ pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn f64x2_convert_low_i32x4(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::i32x2, simd::f64x2>(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [0, 1],))
+ simd_cast::<simd::i32x2, simd::f64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [0, 1],))
.v128()
}
}
@@ -4229,7 +4195,7 @@ pub fn f64x2_convert_low_i32x4(a: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn f64x2_convert_low_u32x4(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::u32x2, simd::f64x2>(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [0, 1],))
+ simd_cast::<simd::u32x2, simd::f64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [0, 1],))
.v128()
}
}
@@ -4246,7 +4212,7 @@ pub fn f64x2_convert_low_u32x4(a: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::f64x4, simd::f32x4>(simd_shuffle4!(
+ simd_cast::<simd::f64x4, simd::f32x4>(simd_shuffle!(
a.as_f64x2(),
simd::f64x2::splat(0.0),
[0, 1, 2, 3]
@@ -4264,7 +4230,7 @@ pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 {
#[stable(feature = "wasm_simd", since = "1.54.0")]
pub fn f64x2_promote_low_f32x4(a: v128) -> v128 {
unsafe {
- simd_cast::<simd::f32x2, simd::f64x2>(simd_shuffle2!(a.as_f32x4(), a.as_f32x4(), [0, 1]))
+ simd_cast::<simd::f32x2, simd::f64x2>(simd_shuffle!(a.as_f32x4(), a.as_f32x4(), [0, 1]))
.v128()
}
}
diff --git a/library/stdarch/crates/core_arch/src/x86/abm.rs b/library/stdarch/crates/core_arch/src/x86/abm.rs
index 50912f774..2688490e1 100644
--- a/library/stdarch/crates/core_arch/src/x86/abm.rs
+++ b/library/stdarch/crates/core_arch/src/x86/abm.rs
@@ -24,7 +24,7 @@ use stdarch_test::assert_instr;
///
/// When the operand is zero, it returns its size in bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_lzcnt_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_lzcnt_u32)
#[inline]
#[target_feature(enable = "lzcnt")]
#[cfg_attr(test, assert_instr(lzcnt))]
@@ -35,7 +35,7 @@ pub unsafe fn _lzcnt_u32(x: u32) -> u32 {
/// Counts the bits that are set.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_popcnt32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_popcnt32)
#[inline]
#[target_feature(enable = "popcnt")]
#[cfg_attr(test, assert_instr(popcnt))]
diff --git a/library/stdarch/crates/core_arch/src/x86/aes.rs b/library/stdarch/crates/core_arch/src/x86/aes.rs
index ffded1a0d..0346c8e05 100644
--- a/library/stdarch/crates/core_arch/src/x86/aes.rs
+++ b/library/stdarch/crates/core_arch/src/x86/aes.rs
@@ -30,7 +30,7 @@ extern "C" {
/// Performs one round of an AES decryption flow on data (state) in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_aesdec_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdec_si128)
#[inline]
#[target_feature(enable = "aes")]
#[cfg_attr(test, assert_instr(aesdec))]
@@ -41,7 +41,7 @@ pub unsafe fn _mm_aesdec_si128(a: __m128i, round_key: __m128i) -> __m128i {
/// Performs the last round of an AES decryption flow on data (state) in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_aesdeclast_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdeclast_si128)
#[inline]
#[target_feature(enable = "aes")]
#[cfg_attr(test, assert_instr(aesdeclast))]
@@ -52,7 +52,7 @@ pub unsafe fn _mm_aesdeclast_si128(a: __m128i, round_key: __m128i) -> __m128i {
/// Performs one round of an AES encryption flow on data (state) in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_aesenc_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesenc_si128)
#[inline]
#[target_feature(enable = "aes")]
#[cfg_attr(test, assert_instr(aesenc))]
@@ -63,7 +63,7 @@ pub unsafe fn _mm_aesenc_si128(a: __m128i, round_key: __m128i) -> __m128i {
/// Performs the last round of an AES encryption flow on data (state) in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_aesenclast_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesenclast_si128)
#[inline]
#[target_feature(enable = "aes")]
#[cfg_attr(test, assert_instr(aesenclast))]
@@ -74,7 +74,7 @@ pub unsafe fn _mm_aesenclast_si128(a: __m128i, round_key: __m128i) -> __m128i {
/// Performs the `InvMixColumns` transformation on `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_aesimc_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesimc_si128)
#[inline]
#[target_feature(enable = "aes")]
#[cfg_attr(test, assert_instr(aesimc))]
@@ -89,14 +89,14 @@ pub unsafe fn _mm_aesimc_si128(a: __m128i) -> __m128i {
/// generating a round key for encryption cipher using data from `a` and an
/// 8-bit round constant `IMM8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_aeskeygenassist_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aeskeygenassist_si128)
#[inline]
#[target_feature(enable = "aes")]
#[cfg_attr(test, assert_instr(aeskeygenassist, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_aeskeygenassist_si128<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
aeskeygenassist(a, IMM8 as u8)
}
diff --git a/library/stdarch/crates/core_arch/src/x86/avx.rs b/library/stdarch/crates/core_arch/src/x86/avx.rs
index f8e83a35b..a77005c0e 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx.rs
@@ -26,7 +26,7 @@ use stdarch_test::assert_instr;
/// Adds packed double-precision (64-bit) floating-point elements
/// in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_add_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_add_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vaddpd))]
@@ -38,7 +38,7 @@ pub unsafe fn _mm256_add_pd(a: __m256d, b: __m256d) -> __m256d {
/// Adds packed single-precision (32-bit) floating-point elements in `a` and
/// `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_add_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_add_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vaddps))]
@@ -50,7 +50,7 @@ pub unsafe fn _mm256_add_ps(a: __m256, b: __m256) -> __m256 {
/// Computes the bitwise AND of a packed double-precision (64-bit)
/// floating-point elements in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_and_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_and_pd)
#[inline]
#[target_feature(enable = "avx")]
// FIXME: Should be 'vandpd' instruction.
@@ -66,7 +66,7 @@ pub unsafe fn _mm256_and_pd(a: __m256d, b: __m256d) -> __m256d {
/// Computes the bitwise AND of packed single-precision (32-bit) floating-point
/// elements in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_and_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_and_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vandps))]
@@ -80,7 +80,7 @@ pub unsafe fn _mm256_and_ps(a: __m256, b: __m256) -> __m256 {
/// Computes the bitwise OR packed double-precision (64-bit) floating-point
/// elements in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_or_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_or_pd)
#[inline]
#[target_feature(enable = "avx")]
// FIXME: should be `vorpd` instruction.
@@ -96,7 +96,7 @@ pub unsafe fn _mm256_or_pd(a: __m256d, b: __m256d) -> __m256d {
/// Computes the bitwise OR packed single-precision (32-bit) floating-point
/// elements in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_or_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_or_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vorps))]
@@ -110,18 +110,18 @@ pub unsafe fn _mm256_or_ps(a: __m256, b: __m256) -> __m256 {
/// Shuffles double-precision (64-bit) floating-point elements within 128-bit
/// lanes using the control in `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shuffle_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vshufpd, MASK = 3))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_shuffle_pd<const MASK: i32>(a: __m256d, b: __m256d) -> __m256d {
- static_assert_imm8!(MASK);
- simd_shuffle4!(
+ static_assert_uimm_bits!(MASK, 8);
+ simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
MASK as u32 & 0b1,
((MASK as u32 >> 1) & 0b1) + 4,
((MASK as u32 >> 2) & 0b1) + 2,
@@ -133,18 +133,18 @@ pub unsafe fn _mm256_shuffle_pd<const MASK: i32>(a: __m256d, b: __m256d) -> __m2
/// Shuffles single-precision (32-bit) floating-point elements in `a` within
/// 128-bit lanes using the control in `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shuffle_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vshufps, MASK = 3))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_shuffle_ps<const MASK: i32>(a: __m256, b: __m256) -> __m256 {
- static_assert_imm8!(MASK);
- simd_shuffle8!(
+ static_assert_uimm_bits!(MASK, 8);
+ simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
MASK as u32 & 0b11,
(MASK as u32 >> 2) & 0b11,
((MASK as u32 >> 4) & 0b11) + 8,
@@ -160,7 +160,7 @@ pub unsafe fn _mm256_shuffle_ps<const MASK: i32>(a: __m256, b: __m256) -> __m256
/// Computes the bitwise NOT of packed double-precision (64-bit) floating-point
/// elements in `a`, and then AND with `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_andnot_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_andnot_pd)
#[inline]
#[target_feature(enable = "avx")]
// FIXME: should be `vandnpd` instruction.
@@ -176,7 +176,7 @@ pub unsafe fn _mm256_andnot_pd(a: __m256d, b: __m256d) -> __m256d {
/// elements in `a`
/// and then AND with `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_andnot_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_andnot_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vandnps))]
@@ -190,7 +190,7 @@ pub unsafe fn _mm256_andnot_ps(a: __m256, b: __m256) -> __m256 {
/// Compares packed double-precision (64-bit) floating-point elements
/// in `a` and `b`, and returns packed maximum values
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_max_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmaxpd))]
@@ -202,7 +202,7 @@ pub unsafe fn _mm256_max_pd(a: __m256d, b: __m256d) -> __m256d {
/// Compares packed single-precision (32-bit) floating-point elements in `a`
/// and `b`, and returns packed maximum values
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_max_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmaxps))]
@@ -214,7 +214,7 @@ pub unsafe fn _mm256_max_ps(a: __m256, b: __m256) -> __m256 {
/// Compares packed double-precision (64-bit) floating-point elements
/// in `a` and `b`, and returns packed minimum values
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_min_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vminpd))]
@@ -226,7 +226,7 @@ pub unsafe fn _mm256_min_pd(a: __m256d, b: __m256d) -> __m256d {
/// Compares packed single-precision (32-bit) floating-point elements in `a`
/// and `b`, and returns packed minimum values
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_min_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vminps))]
@@ -238,7 +238,7 @@ pub unsafe fn _mm256_min_ps(a: __m256, b: __m256) -> __m256 {
/// Multiplies packed double-precision (64-bit) floating-point elements
/// in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mul_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mul_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmulpd))]
@@ -250,7 +250,7 @@ pub unsafe fn _mm256_mul_pd(a: __m256d, b: __m256d) -> __m256d {
/// Multiplies packed single-precision (32-bit) floating-point elements in `a` and
/// `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mul_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mul_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmulps))]
@@ -262,7 +262,7 @@ pub unsafe fn _mm256_mul_ps(a: __m256, b: __m256) -> __m256 {
/// Alternatively adds and subtracts packed double-precision (64-bit)
/// floating-point elements in `a` to/from packed elements in `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_addsub_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_addsub_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vaddsubpd))]
@@ -274,7 +274,7 @@ pub unsafe fn _mm256_addsub_pd(a: __m256d, b: __m256d) -> __m256d {
/// Alternatively adds and subtracts packed single-precision (32-bit)
/// floating-point elements in `a` to/from packed elements in `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_addsub_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_addsub_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vaddsubps))]
@@ -286,7 +286,7 @@ pub unsafe fn _mm256_addsub_ps(a: __m256, b: __m256) -> __m256 {
/// Subtracts packed double-precision (64-bit) floating-point elements in `b`
/// from packed elements in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sub_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sub_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vsubpd))]
@@ -298,7 +298,7 @@ pub unsafe fn _mm256_sub_pd(a: __m256d, b: __m256d) -> __m256d {
/// Subtracts packed single-precision (32-bit) floating-point elements in `b`
/// from packed elements in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sub_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sub_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vsubps))]
@@ -310,7 +310,7 @@ pub unsafe fn _mm256_sub_ps(a: __m256, b: __m256) -> __m256 {
/// Computes the division of each of the 8 packed 32-bit floating-point elements
/// in `a` by the corresponding packed elements in `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_div_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_div_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vdivps))]
@@ -322,7 +322,7 @@ pub unsafe fn _mm256_div_ps(a: __m256, b: __m256) -> __m256 {
/// Computes the division of each of the 4 packed 64-bit floating-point elements
/// in `a` by the corresponding packed elements in `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_div_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_div_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vdivpd))]
@@ -343,21 +343,21 @@ pub unsafe fn _mm256_div_pd(a: __m256d, b: __m256d) -> __m256d {
///
/// [llvm_docs]: https://github.com/llvm-mirror/clang/blob/dcd8d797b20291f1a6b3e0ddda085aa2bbb382a8/lib/Headers/avxintrin.h#L382
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_round_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_round_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vroundpd, ROUNDING = 0x3))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_round_pd<const ROUNDING: i32>(a: __m256d) -> __m256d {
- static_assert_imm4!(ROUNDING);
+ static_assert_uimm_bits!(ROUNDING, 4);
roundpd256(a, ROUNDING)
}
/// Rounds packed double-precision (64-bit) floating point elements in `a`
/// toward positive infinity.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_ceil_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_ceil_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vroundpd))]
@@ -369,7 +369,7 @@ pub unsafe fn _mm256_ceil_pd(a: __m256d) -> __m256d {
/// Rounds packed double-precision (64-bit) floating point elements in `a`
/// toward negative infinity.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_floor_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_floor_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vroundpd))]
@@ -390,21 +390,21 @@ pub unsafe fn _mm256_floor_pd(a: __m256d) -> __m256d {
///
/// [llvm_docs]: https://github.com/llvm-mirror/clang/blob/dcd8d797b20291f1a6b3e0ddda085aa2bbb382a8/lib/Headers/avxintrin.h#L382
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_round_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_round_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vroundps, ROUNDING = 0x00))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_round_ps<const ROUNDING: i32>(a: __m256) -> __m256 {
- static_assert_imm4!(ROUNDING);
+ static_assert_uimm_bits!(ROUNDING, 4);
roundps256(a, ROUNDING)
}
/// Rounds packed single-precision (32-bit) floating point elements in `a`
/// toward positive infinity.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_ceil_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_ceil_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vroundps))]
@@ -416,7 +416,7 @@ pub unsafe fn _mm256_ceil_ps(a: __m256) -> __m256 {
/// Rounds packed single-precision (32-bit) floating point elements in `a`
/// toward negative infinity.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_floor_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_floor_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vroundps))]
@@ -428,7 +428,7 @@ pub unsafe fn _mm256_floor_ps(a: __m256) -> __m256 {
/// Returns the square root of packed single-precision (32-bit) floating point
/// elements in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sqrt_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sqrt_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vsqrtps))]
@@ -440,7 +440,7 @@ pub unsafe fn _mm256_sqrt_ps(a: __m256) -> __m256 {
/// Returns the square root of packed double-precision (64-bit) floating point
/// elements in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sqrt_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sqrt_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vsqrtpd))]
@@ -452,7 +452,7 @@ pub unsafe fn _mm256_sqrt_pd(a: __m256d) -> __m256d {
/// Blends packed double-precision (64-bit) floating-point elements from
/// `a` and `b` using control mask `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_blend_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_blend_pd)
#[inline]
#[target_feature(enable = "avx")]
// Note: LLVM7 prefers single-precision blend instructions when
@@ -462,11 +462,11 @@ pub unsafe fn _mm256_sqrt_pd(a: __m256d) -> __m256d {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_blend_pd<const IMM4: i32>(a: __m256d, b: __m256d) -> __m256d {
- static_assert_imm4!(IMM4);
- simd_shuffle4!(
+ static_assert_uimm_bits!(IMM4, 4);
+ simd_shuffle!(
a,
b,
- <const IMM4: i32> [
+ [
((IMM4 as u32 >> 0) & 1) * 4 + 0,
((IMM4 as u32 >> 1) & 1) * 4 + 1,
((IMM4 as u32 >> 2) & 1) * 4 + 2,
@@ -478,18 +478,18 @@ pub unsafe fn _mm256_blend_pd<const IMM4: i32>(a: __m256d, b: __m256d) -> __m256
/// Blends packed single-precision (32-bit) floating-point elements from
/// `a` and `b` using control mask `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_blend_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_blend_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vblendps, IMM8 = 9))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_blend_ps<const IMM8: i32>(a: __m256, b: __m256) -> __m256 {
- static_assert_imm8!(IMM8);
- simd_shuffle8!(
+ static_assert_uimm_bits!(IMM8, 8);
+ simd_shuffle!(
a,
b,
- <const IMM8: i32> [
+ [
((IMM8 as u32 >> 0) & 1) * 8 + 0,
((IMM8 as u32 >> 1) & 1) * 8 + 1,
((IMM8 as u32 >> 2) & 1) * 8 + 2,
@@ -505,7 +505,7 @@ pub unsafe fn _mm256_blend_ps<const IMM8: i32>(a: __m256, b: __m256) -> __m256 {
/// Blends packed double-precision (64-bit) floating-point elements from
/// `a` and `b` using `c` as a mask.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_blendv_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_blendv_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vblendvpd))]
@@ -517,7 +517,7 @@ pub unsafe fn _mm256_blendv_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d {
/// Blends packed single-precision (32-bit) floating-point elements from
/// `a` and `b` using `c` as a mask.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_blendv_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_blendv_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vblendvps))]
@@ -531,14 +531,14 @@ pub unsafe fn _mm256_blendv_ps(a: __m256, b: __m256, c: __m256) -> __m256 {
/// sum the four products, and conditionally return the sum
/// using the low 4 bits of `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_dp_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_dp_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vdpps, IMM8 = 0x0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_dp_ps<const IMM8: i32>(a: __m256, b: __m256) -> __m256 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
vdpps(a, b, IMM8)
}
@@ -547,7 +547,7 @@ pub unsafe fn _mm256_dp_ps<const IMM8: i32>(a: __m256, b: __m256) -> __m256 {
/// In the result, sums of elements from `a` are returned in even locations,
/// while sums of elements from `b` are returned in odd locations.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hadd_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_hadd_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vhaddpd))]
@@ -562,7 +562,7 @@ pub unsafe fn _mm256_hadd_pd(a: __m256d, b: __m256d) -> __m256d {
/// indices 0, 1, 4, 5; while sums of elements from `b` are locations
/// 2, 3, 6, 7.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hadd_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_hadd_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vhaddps))]
@@ -576,7 +576,7 @@ pub unsafe fn _mm256_hadd_ps(a: __m256, b: __m256) -> __m256 {
/// In the result, sums of elements from `a` are returned in even locations,
/// while sums of elements from `b` are returned in odd locations.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hsub_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_hsub_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vhsubpd))]
@@ -591,7 +591,7 @@ pub unsafe fn _mm256_hsub_pd(a: __m256d, b: __m256d) -> __m256d {
/// indices 0, 1, 4, 5; while sums of elements from `b` are locations
/// 2, 3, 6, 7.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hsub_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_hsub_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vhsubps))]
@@ -603,7 +603,7 @@ pub unsafe fn _mm256_hsub_ps(a: __m256, b: __m256) -> __m256 {
/// Computes the bitwise XOR of packed double-precision (64-bit) floating-point
/// elements in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_xor_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_xor_pd)
#[inline]
#[target_feature(enable = "avx")]
// FIXME Should be 'vxorpd' instruction.
@@ -618,7 +618,7 @@ pub unsafe fn _mm256_xor_pd(a: __m256d, b: __m256d) -> __m256d {
/// Computes the bitwise XOR of packed single-precision (32-bit) floating-point
/// elements in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_xor_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_xor_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vxorps))]
@@ -730,14 +730,14 @@ pub const _CMP_TRUE_US: i32 = 0x1f;
/// elements in `a` and `b` based on the comparison operand
/// specified by `IMM5`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_pd)
#[inline]
#[target_feature(enable = "avx,sse2")]
#[cfg_attr(test, assert_instr(vcmpeqpd, IMM5 = 0))] // TODO Validate vcmppd
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmp_pd<const IMM5: i32>(a: __m128d, b: __m128d) -> __m128d {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
vcmppd(a, b, IMM5 as i8)
}
@@ -745,14 +745,14 @@ pub unsafe fn _mm_cmp_pd<const IMM5: i32>(a: __m128d, b: __m128d) -> __m128d {
/// elements in `a` and `b` based on the comparison operand
/// specified by `IMM5`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vcmpeqpd, IMM5 = 0))] // TODO Validate vcmppd
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_cmp_pd<const IMM5: i32>(a: __m256d, b: __m256d) -> __m256d {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
vcmppd256(a, b, IMM5 as u8)
}
@@ -760,14 +760,14 @@ pub unsafe fn _mm256_cmp_pd<const IMM5: i32>(a: __m256d, b: __m256d) -> __m256d
/// elements in `a` and `b` based on the comparison operand
/// specified by `IMM5`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_ps)
#[inline]
#[target_feature(enable = "avx,sse")]
#[cfg_attr(test, assert_instr(vcmpeqps, IMM5 = 0))] // TODO Validate vcmpps
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmp_ps<const IMM5: i32>(a: __m128, b: __m128) -> __m128 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
vcmpps(a, b, IMM5 as i8)
}
@@ -775,14 +775,14 @@ pub unsafe fn _mm_cmp_ps<const IMM5: i32>(a: __m128, b: __m128) -> __m128 {
/// elements in `a` and `b` based on the comparison operand
/// specified by `IMM5`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vcmpeqps, IMM5 = 0))] // TODO Validate vcmpps
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_cmp_ps<const IMM5: i32>(a: __m256, b: __m256) -> __m256 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
vcmpps256(a, b, IMM5 as u8)
}
@@ -792,14 +792,14 @@ pub unsafe fn _mm256_cmp_ps<const IMM5: i32>(a: __m256, b: __m256) -> __m256 {
/// and copies the upper element from `a` to the upper element of returned
/// vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_sd)
#[inline]
#[target_feature(enable = "avx,sse2")]
#[cfg_attr(test, assert_instr(vcmpeqsd, IMM5 = 0))] // TODO Validate vcmpsd
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmp_sd<const IMM5: i32>(a: __m128d, b: __m128d) -> __m128d {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
vcmpsd(a, b, IMM5 as i8)
}
@@ -809,21 +809,21 @@ pub unsafe fn _mm_cmp_sd<const IMM5: i32>(a: __m128d, b: __m128d) -> __m128d {
/// and copies the upper 3 packed elements from `a` to the upper elements of
/// returned vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_ss)
#[inline]
#[target_feature(enable = "avx,sse")]
#[cfg_attr(test, assert_instr(vcmpeqss, IMM5 = 0))] // TODO Validate vcmpss
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmp_ss<const IMM5: i32>(a: __m128, b: __m128) -> __m128 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
vcmpss(a, b, IMM5 as i8)
}
/// Converts packed 32-bit integers in `a` to packed double-precision (64-bit)
/// floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi32_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi32_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vcvtdq2pd))]
@@ -835,7 +835,7 @@ pub unsafe fn _mm256_cvtepi32_pd(a: __m128i) -> __m256d {
/// Converts packed 32-bit integers in `a` to packed single-precision (32-bit)
/// floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi32_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi32_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vcvtdq2ps))]
@@ -847,7 +847,7 @@ pub unsafe fn _mm256_cvtepi32_ps(a: __m256i) -> __m256 {
/// Converts packed double-precision (64-bit) floating-point elements in `a`
/// to packed single-precision (32-bit) floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtpd_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtpd_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vcvtpd2ps))]
@@ -859,7 +859,7 @@ pub unsafe fn _mm256_cvtpd_ps(a: __m256d) -> __m128 {
/// Converts packed single-precision (32-bit) floating-point elements in `a`
/// to packed 32-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtps_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtps_epi32)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vcvtps2dq))]
@@ -871,7 +871,7 @@ pub unsafe fn _mm256_cvtps_epi32(a: __m256) -> __m256i {
/// Converts packed single-precision (32-bit) floating-point elements in `a`
/// to packed double-precision (64-bit) floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtps_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtps_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vcvtps2pd))]
@@ -883,7 +883,7 @@ pub unsafe fn _mm256_cvtps_pd(a: __m128) -> __m256d {
/// Converts packed double-precision (64-bit) floating-point elements in `a`
/// to packed 32-bit integers with truncation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttpd_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvttpd_epi32)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vcvttpd2dq))]
@@ -895,7 +895,7 @@ pub unsafe fn _mm256_cvttpd_epi32(a: __m256d) -> __m128i {
/// Converts packed double-precision (64-bit) floating-point elements in `a`
/// to packed 32-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtpd_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtpd_epi32)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vcvtpd2dq))]
@@ -907,7 +907,7 @@ pub unsafe fn _mm256_cvtpd_epi32(a: __m256d) -> __m128i {
/// Converts packed single-precision (32-bit) floating-point elements in `a`
/// to packed 32-bit integers with truncation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttps_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvttps_epi32)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vcvttps2dq))]
@@ -919,7 +919,7 @@ pub unsafe fn _mm256_cvttps_epi32(a: __m256) -> __m256i {
/// Extracts 128 bits (composed of 4 packed single-precision (32-bit)
/// floating-point elements) from `a`, selected with `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extractf128_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extractf128_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(
@@ -929,18 +929,18 @@ pub unsafe fn _mm256_cvttps_epi32(a: __m256) -> __m256i {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_extractf128_ps<const IMM1: i32>(a: __m256) -> __m128 {
- static_assert_imm1!(IMM1);
- simd_shuffle4!(
+ static_assert_uimm_bits!(IMM1, 1);
+ simd_shuffle!(
a,
_mm256_undefined_ps(),
- <const IMM1: i32> [[0, 1, 2, 3], [4, 5, 6, 7]][IMM1 as usize],
+ [[0, 1, 2, 3], [4, 5, 6, 7]][IMM1 as usize],
)
}
/// Extracts 128 bits (composed of 2 packed double-precision (64-bit)
/// floating-point elements) from `a`, selected with `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extractf128_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extractf128_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(
@@ -950,13 +950,13 @@ pub unsafe fn _mm256_extractf128_ps<const IMM1: i32>(a: __m256) -> __m128 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_extractf128_pd<const IMM1: i32>(a: __m256d) -> __m128d {
- static_assert_imm1!(IMM1);
- simd_shuffle2!(a, _mm256_undefined_pd(), <const IMM1: i32> [[0, 1], [2, 3]][IMM1 as usize])
+ static_assert_uimm_bits!(IMM1, 1);
+ simd_shuffle!(a, _mm256_undefined_pd(), [[0, 1], [2, 3]][IMM1 as usize])
}
/// Extracts 128 bits (composed of integer data) from `a`, selected with `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extractf128_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extractf128_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(
@@ -966,18 +966,18 @@ pub unsafe fn _mm256_extractf128_pd<const IMM1: i32>(a: __m256d) -> __m128d {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_extractf128_si256<const IMM1: i32>(a: __m256i) -> __m128i {
- static_assert_imm1!(IMM1);
- let dst: i64x2 = simd_shuffle2!(
+ static_assert_uimm_bits!(IMM1, 1);
+ let dst: i64x2 = simd_shuffle!(
a.as_i64x4(),
_mm256_undefined_si256().as_i64x4(),
- <const IMM1: i32> [[0, 1], [2, 3]][IMM1 as usize],
+ [[0, 1], [2, 3]][IMM1 as usize],
);
transmute(dst)
}
/// Zeroes the contents of all XMM or YMM registers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zeroall)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_zeroall)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vzeroall))]
@@ -989,7 +989,7 @@ pub unsafe fn _mm256_zeroall() {
/// Zeroes the upper 128 bits of all YMM registers;
/// the lower 128-bits of the registers are unmodified.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zeroupper)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_zeroupper)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vzeroupper))]
@@ -1001,7 +1001,7 @@ pub unsafe fn _mm256_zeroupper() {
/// Shuffles single-precision (32-bit) floating-point elements in `a`
/// within 128-bit lanes using the control in `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutevar_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutevar_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vpermilps))]
@@ -1013,7 +1013,7 @@ pub unsafe fn _mm256_permutevar_ps(a: __m256, b: __m256i) -> __m256 {
/// Shuffles single-precision (32-bit) floating-point elements in `a`
/// using the control in `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutevar_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutevar_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vpermilps))]
@@ -1025,18 +1025,18 @@ pub unsafe fn _mm_permutevar_ps(a: __m128, b: __m128i) -> __m128 {
/// Shuffles single-precision (32-bit) floating-point elements in `a`
/// within 128-bit lanes using the control in `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permute_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vpermilps, IMM8 = 9))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_permute_ps<const IMM8: i32>(a: __m256) -> __m256 {
- static_assert_imm8!(IMM8);
- simd_shuffle8!(
+ static_assert_uimm_bits!(IMM8, 8);
+ simd_shuffle!(
a,
_mm256_undefined_ps(),
- <const IMM8: i32> [
+ [
(IMM8 as u32 >> 0) & 0b11,
(IMM8 as u32 >> 2) & 0b11,
(IMM8 as u32 >> 4) & 0b11,
@@ -1052,18 +1052,18 @@ pub unsafe fn _mm256_permute_ps<const IMM8: i32>(a: __m256) -> __m256 {
/// Shuffles single-precision (32-bit) floating-point elements in `a`
/// using the control in `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permute_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permute_ps)
#[inline]
#[target_feature(enable = "avx,sse")]
#[cfg_attr(test, assert_instr(vpermilps, IMM8 = 9))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_permute_ps<const IMM8: i32>(a: __m128) -> __m128 {
- static_assert_imm8!(IMM8);
- simd_shuffle4!(
+ static_assert_uimm_bits!(IMM8, 8);
+ simd_shuffle!(
a,
_mm_undefined_ps(),
- <const IMM8: i32> [
+ [
(IMM8 as u32 >> 0) & 0b11,
(IMM8 as u32 >> 2) & 0b11,
(IMM8 as u32 >> 4) & 0b11,
@@ -1075,7 +1075,7 @@ pub unsafe fn _mm_permute_ps<const IMM8: i32>(a: __m128) -> __m128 {
/// Shuffles double-precision (64-bit) floating-point elements in `a`
/// within 256-bit lanes using the control in `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutevar_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutevar_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vpermilpd))]
@@ -1087,7 +1087,7 @@ pub unsafe fn _mm256_permutevar_pd(a: __m256d, b: __m256i) -> __m256d {
/// Shuffles double-precision (64-bit) floating-point elements in `a`
/// using the control in `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutevar_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutevar_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vpermilpd))]
@@ -1099,18 +1099,18 @@ pub unsafe fn _mm_permutevar_pd(a: __m128d, b: __m128i) -> __m128d {
/// Shuffles double-precision (64-bit) floating-point elements in `a`
/// within 128-bit lanes using the control in `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permute_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vpermilpd, IMM4 = 0x1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_permute_pd<const IMM4: i32>(a: __m256d) -> __m256d {
- static_assert_imm4!(IMM4);
- simd_shuffle4!(
+ static_assert_uimm_bits!(IMM4, 4);
+ simd_shuffle!(
a,
_mm256_undefined_pd(),
- <const IMM4: i32> [
+ [
((IMM4 as u32 >> 0) & 1),
((IMM4 as u32 >> 1) & 1),
((IMM4 as u32 >> 2) & 1) + 2,
@@ -1122,67 +1122,67 @@ pub unsafe fn _mm256_permute_pd<const IMM4: i32>(a: __m256d) -> __m256d {
/// Shuffles double-precision (64-bit) floating-point elements in `a`
/// using the control in `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permute_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permute_pd)
#[inline]
#[target_feature(enable = "avx,sse2")]
#[cfg_attr(test, assert_instr(vpermilpd, IMM2 = 0x1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_permute_pd<const IMM2: i32>(a: __m128d) -> __m128d {
- static_assert_imm2!(IMM2);
- simd_shuffle2!(
+ static_assert_uimm_bits!(IMM2, 2);
+ simd_shuffle!(
a,
_mm_undefined_pd(),
- <const IMM2: i32> [(IMM2 as u32) & 1, (IMM2 as u32 >> 1) & 1],
+ [(IMM2 as u32) & 1, (IMM2 as u32 >> 1) & 1],
)
}
/// Shuffles 256 bits (composed of 8 packed single-precision (32-bit)
/// floating-point elements) selected by `imm8` from `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute2f128_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permute2f128_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vperm2f128, IMM8 = 0x5))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_permute2f128_ps<const IMM8: i32>(a: __m256, b: __m256) -> __m256 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
vperm2f128ps256(a, b, IMM8 as i8)
}
/// Shuffles 256 bits (composed of 4 packed double-precision (64-bit)
/// floating-point elements) selected by `imm8` from `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute2f128_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permute2f128_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vperm2f128, IMM8 = 0x31))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_permute2f128_pd<const IMM8: i32>(a: __m256d, b: __m256d) -> __m256d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
vperm2f128pd256(a, b, IMM8 as i8)
}
/// Shuffles 128-bits (composed of integer data) selected by `imm8`
/// from `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute2f128_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permute2f128_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vperm2f128, IMM8 = 0x31))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_permute2f128_si256<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(vperm2f128si256(a.as_i32x8(), b.as_i32x8(), IMM8 as i8))
}
/// Broadcasts a single-precision (32-bit) floating-point element from memory
/// to all elements of the returned vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcast_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcast_ss)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
@@ -1195,7 +1195,7 @@ pub unsafe fn _mm256_broadcast_ss(f: &f32) -> __m256 {
/// Broadcasts a single-precision (32-bit) floating-point element from memory
/// to all elements of the returned vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_broadcast_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcast_ss)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
@@ -1208,7 +1208,7 @@ pub unsafe fn _mm_broadcast_ss(f: &f32) -> __m128 {
/// Broadcasts a double-precision (64-bit) floating-point element from memory
/// to all elements of the returned vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcast_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcast_sd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vbroadcastsd))]
@@ -1221,7 +1221,7 @@ pub unsafe fn _mm256_broadcast_sd(f: &f64) -> __m256d {
/// Broadcasts 128 bits from memory (composed of 4 packed single-precision
/// (32-bit) floating-point elements) to all elements of the returned vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcast_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcast_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vbroadcastf128))]
@@ -1233,7 +1233,7 @@ pub unsafe fn _mm256_broadcast_ps(a: &__m128) -> __m256 {
/// Broadcasts 128 bits from memory (composed of 2 packed double-precision
/// (64-bit) floating-point elements) to all elements of the returned vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcast_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcast_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vbroadcastf128))]
@@ -1246,7 +1246,7 @@ pub unsafe fn _mm256_broadcast_pd(a: &__m128d) -> __m256d {
/// single-precision (32-bit) floating-point elements) from `b` into result
/// at the location specified by `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insertf128_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_insertf128_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(
@@ -1256,11 +1256,11 @@ pub unsafe fn _mm256_broadcast_pd(a: &__m128d) -> __m256d {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_insertf128_ps<const IMM1: i32>(a: __m256, b: __m128) -> __m256 {
- static_assert_imm1!(IMM1);
- simd_shuffle8!(
+ static_assert_uimm_bits!(IMM1, 1);
+ simd_shuffle!(
a,
_mm256_castps128_ps256(b),
- <const IMM1: i32> [[8, 9, 10, 11, 4, 5, 6, 7], [0, 1, 2, 3, 8, 9, 10, 11]][IMM1 as usize],
+ [[8, 9, 10, 11, 4, 5, 6, 7], [0, 1, 2, 3, 8, 9, 10, 11]][IMM1 as usize],
)
}
@@ -1268,7 +1268,7 @@ pub unsafe fn _mm256_insertf128_ps<const IMM1: i32>(a: __m256, b: __m128) -> __m
/// double-precision (64-bit) floating-point elements) from `b` into result
/// at the location specified by `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insertf128_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_insertf128_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(
@@ -1278,18 +1278,18 @@ pub unsafe fn _mm256_insertf128_ps<const IMM1: i32>(a: __m256, b: __m128) -> __m
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_insertf128_pd<const IMM1: i32>(a: __m256d, b: __m128d) -> __m256d {
- static_assert_imm1!(IMM1);
- simd_shuffle4!(
+ static_assert_uimm_bits!(IMM1, 1);
+ simd_shuffle!(
a,
_mm256_castpd128_pd256(b),
- <const IMM1: i32> [[4, 5, 2, 3], [0, 1, 4, 5]][IMM1 as usize],
+ [[4, 5, 2, 3], [0, 1, 4, 5]][IMM1 as usize],
)
}
/// Copies `a` to result, then inserts 128 bits from `b` into result
/// at the location specified by `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insertf128_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_insertf128_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(
@@ -1299,11 +1299,11 @@ pub unsafe fn _mm256_insertf128_pd<const IMM1: i32>(a: __m256d, b: __m128d) -> _
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_insertf128_si256<const IMM1: i32>(a: __m256i, b: __m128i) -> __m256i {
- static_assert_imm1!(IMM1);
- let dst: i64x4 = simd_shuffle4!(
+ static_assert_uimm_bits!(IMM1, 1);
+ let dst: i64x4 = simd_shuffle!(
a.as_i64x4(),
_mm256_castsi128_si256(b).as_i64x4(),
- <const IMM1: i32> [[4, 5, 2, 3], [0, 1, 4, 5]][IMM1 as usize],
+ [[4, 5, 2, 3], [0, 1, 4, 5]][IMM1 as usize],
);
transmute(dst)
}
@@ -1311,42 +1311,42 @@ pub unsafe fn _mm256_insertf128_si256<const IMM1: i32>(a: __m256i, b: __m128i) -
/// Copies `a` to result, and inserts the 8-bit integer `i` into result
/// at the location specified by `index`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insert_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_insert_epi8)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_insert_epi8<const INDEX: i32>(a: __m256i, i: i8) -> __m256i {
- static_assert_imm5!(INDEX);
+ static_assert_uimm_bits!(INDEX, 5);
transmute(simd_insert(a.as_i8x32(), INDEX as u32, i))
}
/// Copies `a` to result, and inserts the 16-bit integer `i` into result
/// at the location specified by `index`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insert_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_insert_epi16)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_insert_epi16<const INDEX: i32>(a: __m256i, i: i16) -> __m256i {
- static_assert_imm4!(INDEX);
+ static_assert_uimm_bits!(INDEX, 4);
transmute(simd_insert(a.as_i16x16(), INDEX as u32, i))
}
/// Copies `a` to result, and inserts the 32-bit integer `i` into result
/// at the location specified by `index`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insert_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_insert_epi32)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_insert_epi32<const INDEX: i32>(a: __m256i, i: i32) -> __m256i {
- static_assert_imm3!(INDEX);
+ static_assert_uimm_bits!(INDEX, 3);
transmute(simd_insert(a.as_i32x8(), INDEX as u32, i))
}
@@ -1355,7 +1355,7 @@ pub unsafe fn _mm256_insert_epi32<const INDEX: i32>(a: __m256i, i: i32) -> __m25
/// `mem_addr` must be aligned on a 32-byte boundary or a
/// general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_load_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_load_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovaps))] // FIXME vmovapd expected
@@ -1370,7 +1370,7 @@ pub unsafe fn _mm256_load_pd(mem_addr: *const f64) -> __m256d {
/// `mem_addr` must be aligned on a 32-byte boundary or a
/// general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_store_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_store_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovaps))] // FIXME vmovapd expected
@@ -1385,7 +1385,7 @@ pub unsafe fn _mm256_store_pd(mem_addr: *mut f64, a: __m256d) {
/// `mem_addr` must be aligned on a 32-byte boundary or a
/// general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_load_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_load_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovaps))]
@@ -1400,7 +1400,7 @@ pub unsafe fn _mm256_load_ps(mem_addr: *const f32) -> __m256 {
/// `mem_addr` must be aligned on a 32-byte boundary or a
/// general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_store_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_store_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovaps))]
@@ -1414,7 +1414,7 @@ pub unsafe fn _mm256_store_ps(mem_addr: *mut f32, a: __m256) {
/// floating-point elements) from memory into result.
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovups))] // FIXME vmovupd expected
@@ -1433,7 +1433,7 @@ pub unsafe fn _mm256_loadu_pd(mem_addr: *const f64) -> __m256d {
/// floating-point elements) from `a` into memory.
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovups))] // FIXME vmovupd expected
@@ -1446,7 +1446,7 @@ pub unsafe fn _mm256_storeu_pd(mem_addr: *mut f64, a: __m256d) {
/// floating-point elements) from memory into result.
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovups))]
@@ -1465,7 +1465,7 @@ pub unsafe fn _mm256_loadu_ps(mem_addr: *const f32) -> __m256 {
/// floating-point elements) from `a` into memory.
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovups))]
@@ -1478,7 +1478,7 @@ pub unsafe fn _mm256_storeu_ps(mem_addr: *mut f32, a: __m256) {
/// `mem_addr` must be aligned on a 32-byte boundary or a
/// general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_load_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_load_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovaps))] // FIXME vmovdqa expected
@@ -1491,7 +1491,7 @@ pub unsafe fn _mm256_load_si256(mem_addr: *const __m256i) -> __m256i {
/// `mem_addr` must be aligned on a 32-byte boundary or a
/// general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_store_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_store_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovaps))] // FIXME vmovdqa expected
@@ -1503,7 +1503,7 @@ pub unsafe fn _mm256_store_si256(mem_addr: *mut __m256i, a: __m256i) {
/// Loads 256-bits of integer data from memory into result.
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovups))] // FIXME vmovdqu expected
@@ -1521,7 +1521,7 @@ pub unsafe fn _mm256_loadu_si256(mem_addr: *const __m256i) -> __m256i {
/// Stores 256-bits of integer data from `a` into memory.
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovups))] // FIXME vmovdqu expected
@@ -1534,7 +1534,7 @@ pub unsafe fn _mm256_storeu_si256(mem_addr: *mut __m256i, a: __m256i) {
/// into result using `mask` (elements are zeroed out when the high bit of the
/// corresponding element is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskload_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskload_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmaskmovpd))]
@@ -1546,7 +1546,7 @@ pub unsafe fn _mm256_maskload_pd(mem_addr: *const f64, mask: __m256i) -> __m256d
/// Stores packed double-precision (64-bit) floating-point elements from `a`
/// into memory using `mask`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskstore_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskstore_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmaskmovpd))]
@@ -1559,7 +1559,7 @@ pub unsafe fn _mm256_maskstore_pd(mem_addr: *mut f64, mask: __m256i, a: __m256d)
/// into result using `mask` (elements are zeroed out when the high bit of the
/// corresponding element is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskload_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskload_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmaskmovpd))]
@@ -1571,7 +1571,7 @@ pub unsafe fn _mm_maskload_pd(mem_addr: *const f64, mask: __m128i) -> __m128d {
/// Stores packed double-precision (64-bit) floating-point elements from `a`
/// into memory using `mask`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskstore_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskstore_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmaskmovpd))]
@@ -1584,7 +1584,7 @@ pub unsafe fn _mm_maskstore_pd(mem_addr: *mut f64, mask: __m128i, a: __m128d) {
/// into result using `mask` (elements are zeroed out when the high bit of the
/// corresponding element is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskload_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskload_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmaskmovps))]
@@ -1596,7 +1596,7 @@ pub unsafe fn _mm256_maskload_ps(mem_addr: *const f32, mask: __m256i) -> __m256
/// Stores packed single-precision (32-bit) floating-point elements from `a`
/// into memory using `mask`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskstore_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskstore_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmaskmovps))]
@@ -1609,7 +1609,7 @@ pub unsafe fn _mm256_maskstore_ps(mem_addr: *mut f32, mask: __m256i, a: __m256)
/// into result using `mask` (elements are zeroed out when the high bit of the
/// corresponding element is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskload_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskload_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmaskmovps))]
@@ -1621,7 +1621,7 @@ pub unsafe fn _mm_maskload_ps(mem_addr: *const f32, mask: __m128i) -> __m128 {
/// Stores packed single-precision (32-bit) floating-point elements from `a`
/// into memory using `mask`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskstore_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskstore_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmaskmovps))]
@@ -1633,44 +1633,44 @@ pub unsafe fn _mm_maskstore_ps(mem_addr: *mut f32, mask: __m128i, a: __m128) {
/// Duplicate odd-indexed single-precision (32-bit) floating-point elements
/// from `a`, and returns the results.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movehdup_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movehdup_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovshdup))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_movehdup_ps(a: __m256) -> __m256 {
- simd_shuffle8!(a, a, [1, 1, 3, 3, 5, 5, 7, 7])
+ simd_shuffle!(a, a, [1, 1, 3, 3, 5, 5, 7, 7])
}
/// Duplicate even-indexed single-precision (32-bit) floating-point elements
/// from `a`, and returns the results.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_moveldup_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_moveldup_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovsldup))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_moveldup_ps(a: __m256) -> __m256 {
- simd_shuffle8!(a, a, [0, 0, 2, 2, 4, 4, 6, 6])
+ simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6])
}
/// Duplicate even-indexed double-precision (64-bit) floating-point elements
/// from `a`, and returns the results.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movedup_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movedup_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovddup))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_movedup_pd(a: __m256d) -> __m256d {
- simd_shuffle4!(a, a, [0, 0, 2, 2])
+ simd_shuffle!(a, a, [0, 0, 2, 2])
}
/// Loads 256-bits of integer data from unaligned memory into result.
/// This intrinsic may perform better than `_mm256_loadu_si256` when the
/// data crosses a cache line boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_lddqu_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_lddqu_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vlddqu))]
@@ -1683,7 +1683,7 @@ pub unsafe fn _mm256_lddqu_si256(mem_addr: *const __m256i) -> __m256i {
/// aligned memory location. To minimize caching, the data is flagged as
/// non-temporal (unlikely to be used again soon)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_stream_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_stream_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovntps))] // FIXME vmovntdq
@@ -1696,7 +1696,7 @@ pub unsafe fn _mm256_stream_si256(mem_addr: *mut __m256i, a: __m256i) {
/// to a 32-byte aligned memory location. To minimize caching, the data is
/// flagged as non-temporal (unlikely to be used again soon).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_stream_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_stream_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovntps))] // FIXME vmovntpd
@@ -1711,7 +1711,7 @@ pub unsafe fn _mm256_stream_pd(mem_addr: *mut f64, a: __m256d) {
/// caching, the data is flagged as non-temporal (unlikely to be used again
/// soon).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_stream_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_stream_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovntps))]
@@ -1725,7 +1725,7 @@ pub unsafe fn _mm256_stream_ps(mem_addr: *mut f32, a: __m256) {
/// floating-point elements in `a`, and returns the results. The maximum
/// relative error for this approximation is less than 1.5*2^-12.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rcp_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rcp_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vrcpps))]
@@ -1738,7 +1738,7 @@ pub unsafe fn _mm256_rcp_ps(a: __m256) -> __m256 {
/// (32-bit) floating-point elements in `a`, and returns the results.
/// The maximum relative error for this approximation is less than 1.5*2^-12.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rsqrt_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rsqrt_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vrsqrtps))]
@@ -1750,49 +1750,49 @@ pub unsafe fn _mm256_rsqrt_ps(a: __m256) -> __m256 {
/// Unpacks and interleave double-precision (64-bit) floating-point elements
/// from the high half of each 128-bit lane in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpackhi_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_unpackhi_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vunpckhpd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_unpackhi_pd(a: __m256d, b: __m256d) -> __m256d {
- simd_shuffle4!(a, b, [1, 5, 3, 7])
+ simd_shuffle!(a, b, [1, 5, 3, 7])
}
/// Unpacks and interleave single-precision (32-bit) floating-point elements
/// from the high half of each 128-bit lane in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpackhi_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_unpackhi_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vunpckhps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_unpackhi_ps(a: __m256, b: __m256) -> __m256 {
- simd_shuffle8!(a, b, [2, 10, 3, 11, 6, 14, 7, 15])
+ simd_shuffle!(a, b, [2, 10, 3, 11, 6, 14, 7, 15])
}
/// Unpacks and interleave double-precision (64-bit) floating-point elements
/// from the low half of each 128-bit lane in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpacklo_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_unpacklo_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vunpcklpd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_unpacklo_pd(a: __m256d, b: __m256d) -> __m256d {
- simd_shuffle4!(a, b, [0, 4, 2, 6])
+ simd_shuffle!(a, b, [0, 4, 2, 6])
}
/// Unpacks and interleave single-precision (32-bit) floating-point elements
/// from the low half of each 128-bit lane in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpacklo_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_unpacklo_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vunpcklps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_unpacklo_ps(a: __m256, b: __m256) -> __m256 {
- simd_shuffle8!(a, b, [0, 8, 1, 9, 4, 12, 5, 13])
+ simd_shuffle!(a, b, [0, 8, 1, 9, 4, 12, 5, 13])
}
/// Computes the bitwise AND of 256 bits (representing integer data) in `a` and
@@ -1800,7 +1800,7 @@ pub unsafe fn _mm256_unpacklo_ps(a: __m256, b: __m256) -> __m256 {
/// Computes the bitwise NOT of `a` and then AND with `b`, and set `CF` to 1 if
/// the result is zero, otherwise set `CF` to 0. Return the `ZF` value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testz_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testz_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vptest))]
@@ -1814,7 +1814,7 @@ pub unsafe fn _mm256_testz_si256(a: __m256i, b: __m256i) -> i32 {
/// Computes the bitwise NOT of `a` and then AND with `b`, and set `CF` to 1 if
/// the result is zero, otherwise set `CF` to 0. Return the `CF` value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testc_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testc_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vptest))]
@@ -1829,7 +1829,7 @@ pub unsafe fn _mm256_testc_si256(a: __m256i, b: __m256i) -> i32 {
/// the result is zero, otherwise set `CF` to 0. Return 1 if both the `ZF` and
/// `CF` values are zero, otherwise return 0.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testnzc_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testnzc_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vptest))]
@@ -1846,7 +1846,7 @@ pub unsafe fn _mm256_testnzc_si256(a: __m256i, b: __m256i) -> i32 {
/// `CF` to 1 if the sign bit of each 64-bit element in the intermediate value
/// is zero, otherwise set `CF` to 0. Return the `ZF` value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testz_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testz_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vtestpd))]
@@ -1863,7 +1863,7 @@ pub unsafe fn _mm256_testz_pd(a: __m256d, b: __m256d) -> i32 {
/// `CF` to 1 if the sign bit of each 64-bit element in the intermediate value
/// is zero, otherwise set `CF` to 0. Return the `CF` value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testc_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testc_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vtestpd))]
@@ -1881,7 +1881,7 @@ pub unsafe fn _mm256_testc_pd(a: __m256d, b: __m256d) -> i32 {
/// is zero, otherwise set `CF` to 0. Return 1 if both the `ZF` and `CF` values
/// are zero, otherwise return 0.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testnzc_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testnzc_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vtestpd))]
@@ -1898,7 +1898,7 @@ pub unsafe fn _mm256_testnzc_pd(a: __m256d, b: __m256d) -> i32 {
/// `CF` to 1 if the sign bit of each 64-bit element in the intermediate value
/// is zero, otherwise set `CF` to 0. Return the `ZF` value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testz_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testz_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vtestpd))]
@@ -1915,7 +1915,7 @@ pub unsafe fn _mm_testz_pd(a: __m128d, b: __m128d) -> i32 {
/// `CF` to 1 if the sign bit of each 64-bit element in the intermediate value
/// is zero, otherwise set `CF` to 0. Return the `CF` value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testc_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testc_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vtestpd))]
@@ -1933,7 +1933,7 @@ pub unsafe fn _mm_testc_pd(a: __m128d, b: __m128d) -> i32 {
/// is zero, otherwise set `CF` to 0. Return 1 if both the `ZF` and `CF` values
/// are zero, otherwise return 0.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testnzc_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testnzc_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vtestpd))]
@@ -1950,7 +1950,7 @@ pub unsafe fn _mm_testnzc_pd(a: __m128d, b: __m128d) -> i32 {
/// `CF` to 1 if the sign bit of each 32-bit element in the intermediate value
/// is zero, otherwise set `CF` to 0. Return the `ZF` value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testz_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testz_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vtestps))]
@@ -1967,7 +1967,7 @@ pub unsafe fn _mm256_testz_ps(a: __m256, b: __m256) -> i32 {
/// `CF` to 1 if the sign bit of each 32-bit element in the intermediate value
/// is zero, otherwise set `CF` to 0. Return the `CF` value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testc_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testc_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vtestps))]
@@ -1985,7 +1985,7 @@ pub unsafe fn _mm256_testc_ps(a: __m256, b: __m256) -> i32 {
/// is zero, otherwise set `CF` to 0. Return 1 if both the `ZF` and `CF` values
/// are zero, otherwise return 0.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testnzc_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testnzc_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vtestps))]
@@ -2002,7 +2002,7 @@ pub unsafe fn _mm256_testnzc_ps(a: __m256, b: __m256) -> i32 {
/// `CF` to 1 if the sign bit of each 32-bit element in the intermediate value
/// is zero, otherwise set `CF` to 0. Return the `ZF` value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testz_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testz_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vtestps))]
@@ -2019,7 +2019,7 @@ pub unsafe fn _mm_testz_ps(a: __m128, b: __m128) -> i32 {
/// `CF` to 1 if the sign bit of each 32-bit element in the intermediate value
/// is zero, otherwise set `CF` to 0. Return the `CF` value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testc_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testc_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vtestps))]
@@ -2037,7 +2037,7 @@ pub unsafe fn _mm_testc_ps(a: __m128, b: __m128) -> i32 {
/// is zero, otherwise set `CF` to 0. Return 1 if both the `ZF` and `CF` values
/// are zero, otherwise return 0.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testnzc_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testnzc_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vtestps))]
@@ -2050,7 +2050,7 @@ pub unsafe fn _mm_testnzc_ps(a: __m128, b: __m128) -> i32 {
/// corresponding packed double-precision (64-bit) floating-point element in
/// `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movemask_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movemask_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovmskpd))]
@@ -2063,7 +2063,7 @@ pub unsafe fn _mm256_movemask_pd(a: __m256d) -> i32 {
/// corresponding packed single-precision (32-bit) floating-point element in
/// `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movemask_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movemask_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vmovmskps))]
@@ -2074,7 +2074,7 @@ pub unsafe fn _mm256_movemask_ps(a: __m256) -> i32 {
/// Returns vector of type __m256d with all elements set to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setzero_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_setzero_pd)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vxorps))] // FIXME vxorpd expected
@@ -2085,7 +2085,7 @@ pub unsafe fn _mm256_setzero_pd() -> __m256d {
/// Returns vector of type __m256 with all elements set to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setzero_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_setzero_ps)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vxorps))]
@@ -2096,7 +2096,7 @@ pub unsafe fn _mm256_setzero_ps() -> __m256 {
/// Returns vector of type __m256i with all elements set to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setzero_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_setzero_si256)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vxor))]
@@ -2108,7 +2108,7 @@ pub unsafe fn _mm256_setzero_si256() -> __m256i {
/// Sets packed double-precision (64-bit) floating-point elements in returned
/// vector with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set_pd)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2121,7 +2121,7 @@ pub unsafe fn _mm256_set_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d {
/// Sets packed single-precision (32-bit) floating-point elements in returned
/// vector with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set_ps)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2141,7 +2141,7 @@ pub unsafe fn _mm256_set_ps(
/// Sets packed 8-bit integers in returned vector with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set_epi8)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2191,7 +2191,7 @@ pub unsafe fn _mm256_set_epi8(
/// Sets packed 16-bit integers in returned vector with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set_epi16)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2225,7 +2225,7 @@ pub unsafe fn _mm256_set_epi16(
/// Sets packed 32-bit integers in returned vector with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set_epi32)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2245,7 +2245,7 @@ pub unsafe fn _mm256_set_epi32(
/// Sets packed 64-bit integers in returned vector with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_epi64x)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set_epi64x)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2257,7 +2257,7 @@ pub unsafe fn _mm256_set_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i {
/// Sets packed double-precision (64-bit) floating-point elements in returned
/// vector with the supplied values in reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_setr_pd)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2269,7 +2269,7 @@ pub unsafe fn _mm256_setr_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d {
/// Sets packed single-precision (32-bit) floating-point elements in returned
/// vector with the supplied values in reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_setr_ps)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2290,7 +2290,7 @@ pub unsafe fn _mm256_setr_ps(
/// Sets packed 8-bit integers in returned vector with the supplied values in
/// reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_setr_epi8)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2341,7 +2341,7 @@ pub unsafe fn _mm256_setr_epi8(
/// Sets packed 16-bit integers in returned vector with the supplied values in
/// reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_setr_epi16)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2376,7 +2376,7 @@ pub unsafe fn _mm256_setr_epi16(
/// Sets packed 32-bit integers in returned vector with the supplied values in
/// reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_setr_epi32)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2397,7 +2397,7 @@ pub unsafe fn _mm256_setr_epi32(
/// Sets packed 64-bit integers in returned vector with the supplied values in
/// reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_epi64x)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_setr_epi64x)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2409,7 +2409,7 @@ pub unsafe fn _mm256_setr_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i {
/// Broadcasts double-precision (64-bit) floating-point value `a` to all
/// elements of returned vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set1_pd)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2421,7 +2421,7 @@ pub unsafe fn _mm256_set1_pd(a: f64) -> __m256d {
/// Broadcasts single-precision (32-bit) floating-point value `a` to all
/// elements of returned vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set1_ps)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2433,7 +2433,7 @@ pub unsafe fn _mm256_set1_ps(a: f32) -> __m256 {
/// Broadcasts 8-bit integer `a` to all elements of returned vector.
/// This intrinsic may generate the `vpbroadcastb`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set1_epi8)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vpshufb))]
@@ -2453,7 +2453,7 @@ pub unsafe fn _mm256_set1_epi8(a: i8) -> __m256i {
/// Broadcasts 16-bit integer `a` to all elements of returned vector.
/// This intrinsic may generate the `vpbroadcastw`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set1_epi16)
#[inline]
#[target_feature(enable = "avx")]
//#[cfg_attr(test, assert_instr(vpshufb))]
@@ -2467,7 +2467,7 @@ pub unsafe fn _mm256_set1_epi16(a: i16) -> __m256i {
/// Broadcasts 32-bit integer `a` to all elements of returned vector.
/// This intrinsic may generate the `vpbroadcastd`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set1_epi32)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2479,7 +2479,7 @@ pub unsafe fn _mm256_set1_epi32(a: i32) -> __m256i {
/// Broadcasts 64-bit integer `a` to all elements of returned vector.
/// This intrinsic may generate the `vpbroadcastq`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_epi64x)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set1_epi64x)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(all(test, target_arch = "x86_64"), assert_instr(vinsertf128))]
@@ -2492,7 +2492,7 @@ pub unsafe fn _mm256_set1_epi64x(a: i64) -> __m256i {
/// Cast vector of type __m256d to type __m256.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castpd_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_castpd_ps)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic is only used for compilation and does not generate any
@@ -2504,7 +2504,7 @@ pub unsafe fn _mm256_castpd_ps(a: __m256d) -> __m256 {
/// Cast vector of type __m256 to type __m256d.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castps_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_castps_pd)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic is only used for compilation and does not generate any
@@ -2516,7 +2516,7 @@ pub unsafe fn _mm256_castps_pd(a: __m256) -> __m256d {
/// Casts vector of type __m256 to type __m256i.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castps_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_castps_si256)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic is only used for compilation and does not generate any
@@ -2528,7 +2528,7 @@ pub unsafe fn _mm256_castps_si256(a: __m256) -> __m256i {
/// Casts vector of type __m256i to type __m256.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castsi256_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_castsi256_ps)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic is only used for compilation and does not generate any
@@ -2540,7 +2540,7 @@ pub unsafe fn _mm256_castsi256_ps(a: __m256i) -> __m256 {
/// Casts vector of type __m256d to type __m256i.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castpd_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_castpd_si256)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic is only used for compilation and does not generate any
@@ -2552,7 +2552,7 @@ pub unsafe fn _mm256_castpd_si256(a: __m256d) -> __m256i {
/// Casts vector of type __m256i to type __m256d.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castsi256_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_castsi256_pd)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic is only used for compilation and does not generate any
@@ -2564,31 +2564,31 @@ pub unsafe fn _mm256_castsi256_pd(a: __m256i) -> __m256d {
/// Casts vector of type __m256 to type __m128.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castps256_ps128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_castps256_ps128)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic is only used for compilation and does not generate any
// instructions, thus it has zero latency.
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_castps256_ps128(a: __m256) -> __m128 {
- simd_shuffle4!(a, a, [0, 1, 2, 3])
+ simd_shuffle!(a, a, [0, 1, 2, 3])
}
/// Casts vector of type __m256d to type __m128d.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castpd256_pd128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_castpd256_pd128)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic is only used for compilation and does not generate any
// instructions, thus it has zero latency.
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_castpd256_pd128(a: __m256d) -> __m128d {
- simd_shuffle2!(a, a, [0, 1])
+ simd_shuffle!(a, a, [0, 1])
}
/// Casts vector of type __m256i to type __m128i.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castsi256_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_castsi256_si128)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic is only used for compilation and does not generate any
@@ -2596,42 +2596,42 @@ pub unsafe fn _mm256_castpd256_pd128(a: __m256d) -> __m128d {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_castsi256_si128(a: __m256i) -> __m128i {
let a = a.as_i64x4();
- let dst: i64x2 = simd_shuffle2!(a, a, [0, 1]);
+ let dst: i64x2 = simd_shuffle!(a, a, [0, 1]);
transmute(dst)
}
/// Casts vector of type __m128 to type __m256;
/// the upper 128 bits of the result are undefined.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castps128_ps256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_castps128_ps256)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic is only used for compilation and does not generate any
// instructions, thus it has zero latency.
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_castps128_ps256(a: __m128) -> __m256 {
- // FIXME simd_shuffle8!(a, a, [0, 1, 2, 3, -1, -1, -1, -1])
- simd_shuffle8!(a, a, [0, 1, 2, 3, 0, 0, 0, 0])
+ // FIXME simd_shuffle!(a, a, [0, 1, 2, 3, -1, -1, -1, -1])
+ simd_shuffle!(a, a, [0, 1, 2, 3, 0, 0, 0, 0])
}
/// Casts vector of type __m128d to type __m256d;
/// the upper 128 bits of the result are undefined.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castpd128_pd256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_castpd128_pd256)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic is only used for compilation and does not generate any
// instructions, thus it has zero latency.
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_castpd128_pd256(a: __m128d) -> __m256d {
- // FIXME simd_shuffle4!(a, a, [0, 1, -1, -1])
- simd_shuffle4!(a, a, [0, 1, 0, 0])
+ // FIXME simd_shuffle!(a, a, [0, 1, -1, -1])
+ simd_shuffle!(a, a, [0, 1, 0, 0])
}
/// Casts vector of type __m128i to type __m256i;
/// the upper 128 bits of the result are undefined.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castsi128_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_castsi128_si256)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic is only used for compilation and does not generate any
@@ -2639,8 +2639,8 @@ pub unsafe fn _mm256_castpd128_pd256(a: __m128d) -> __m256d {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_castsi128_si256(a: __m128i) -> __m256i {
let a = a.as_i64x2();
- // FIXME simd_shuffle4!(a, a, [0, 1, -1, -1])
- let dst: i64x4 = simd_shuffle4!(a, a, [0, 1, 0, 0]);
+ // FIXME simd_shuffle!(a, a, [0, 1, -1, -1])
+ let dst: i64x4 = simd_shuffle!(a, a, [0, 1, 0, 0]);
transmute(dst)
}
@@ -2648,21 +2648,21 @@ pub unsafe fn _mm256_castsi128_si256(a: __m128i) -> __m256i {
/// 128-bit floating-point vector of `[4 x float]`. The lower 128 bits contain
/// the value of the source vector. The upper 128 bits are set to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zextps128_ps256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_zextps128_ps256)
#[inline]
#[target_feature(enable = "avx,sse")]
// This intrinsic is only used for compilation and does not generate any
// instructions, thus it has zero latency.
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_zextps128_ps256(a: __m128) -> __m256 {
- simd_shuffle8!(a, _mm_setzero_ps(), [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, _mm_setzero_ps(), [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Constructs a 256-bit integer vector from a 128-bit integer vector.
/// The lower 128 bits contain the value of the source vector. The upper
/// 128 bits are set to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zextsi128_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_zextsi128_si256)
#[inline]
#[target_feature(enable = "avx,sse2")]
// This intrinsic is only used for compilation and does not generate any
@@ -2670,7 +2670,7 @@ pub unsafe fn _mm256_zextps128_ps256(a: __m128) -> __m256 {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_zextsi128_si256(a: __m128i) -> __m256i {
let b = _mm_setzero_si128().as_i64x2();
- let dst: i64x4 = simd_shuffle4!(a.as_i64x2(), b, [0, 1, 2, 3]);
+ let dst: i64x4 = simd_shuffle!(a.as_i64x2(), b, [0, 1, 2, 3]);
transmute(dst)
}
@@ -2679,19 +2679,19 @@ pub unsafe fn _mm256_zextsi128_si256(a: __m128i) -> __m256i {
/// contain the value of the source vector. The upper 128 bits are set
/// to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zextpd128_pd256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_zextpd128_pd256)
#[inline]
#[target_feature(enable = "avx,sse2")]
// This intrinsic is only used for compilation and does not generate any
// instructions, thus it has zero latency.
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_zextpd128_pd256(a: __m128d) -> __m256d {
- simd_shuffle4!(a, _mm_setzero_pd(), [0, 1, 2, 3])
+ simd_shuffle!(a, _mm_setzero_pd(), [0, 1, 2, 3])
}
/// Returns vector of type `__m256` with undefined elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_undefined_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_undefined_ps)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2702,7 +2702,7 @@ pub unsafe fn _mm256_undefined_ps() -> __m256 {
/// Returns vector of type `__m256d` with undefined elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_undefined_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_undefined_pd)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2713,7 +2713,7 @@ pub unsafe fn _mm256_undefined_pd() -> __m256d {
/// Returns vector of type __m256i with undefined elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_undefined_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_undefined_si256)
#[inline]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
@@ -2724,18 +2724,18 @@ pub unsafe fn _mm256_undefined_si256() -> __m256i {
/// Sets packed __m256 returned vector with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_m128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set_m128)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vinsertf128))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_set_m128(hi: __m128, lo: __m128) -> __m256 {
- simd_shuffle8!(lo, hi, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(lo, hi, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Sets packed __m256d returned vector with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_m128d)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set_m128d)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vinsertf128))]
@@ -2748,7 +2748,7 @@ pub unsafe fn _mm256_set_m128d(hi: __m128d, lo: __m128d) -> __m256d {
/// Sets packed __m256i returned vector with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_m128i)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_set_m128i)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vinsertf128))]
@@ -2761,7 +2761,7 @@ pub unsafe fn _mm256_set_m128i(hi: __m128i, lo: __m128i) -> __m256i {
/// Sets packed __m256 returned vector with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_m128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_setr_m128)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vinsertf128))]
@@ -2772,7 +2772,7 @@ pub unsafe fn _mm256_setr_m128(lo: __m128, hi: __m128) -> __m256 {
/// Sets packed __m256d returned vector with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_m128d)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_setr_m128d)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vinsertf128))]
@@ -2783,7 +2783,7 @@ pub unsafe fn _mm256_setr_m128d(lo: __m128d, hi: __m128d) -> __m256d {
/// Sets packed __m256i returned vector with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_m128i)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_setr_m128i)
#[inline]
#[target_feature(enable = "avx")]
#[cfg_attr(test, assert_instr(vinsertf128))]
@@ -2797,7 +2797,7 @@ pub unsafe fn _mm256_setr_m128i(lo: __m128i, hi: __m128i) -> __m256i {
/// value.
/// `hiaddr` and `loaddr` do not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu2_m128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu2_m128)
#[inline]
#[target_feature(enable = "avx,sse")]
// This intrinsic has no corresponding instruction.
@@ -2812,7 +2812,7 @@ pub unsafe fn _mm256_loadu2_m128(hiaddr: *const f32, loaddr: *const f32) -> __m2
/// value.
/// `hiaddr` and `loaddr` do not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu2_m128d)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu2_m128d)
#[inline]
#[target_feature(enable = "avx,sse2")]
// This intrinsic has no corresponding instruction.
@@ -2826,7 +2826,7 @@ pub unsafe fn _mm256_loadu2_m128d(hiaddr: *const f64, loaddr: *const f64) -> __m
/// them into a 256-bit value.
/// `hiaddr` and `loaddr` do not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu2_m128i)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu2_m128i)
#[inline]
#[target_feature(enable = "avx,sse2")]
// This intrinsic has no corresponding instruction.
@@ -2841,7 +2841,7 @@ pub unsafe fn _mm256_loadu2_m128i(hiaddr: *const __m128i, loaddr: *const __m128i
/// different 128-bit locations.
/// `hiaddr` and `loaddr` do not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu2_m128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu2_m128)
#[inline]
#[target_feature(enable = "avx,sse")]
// This intrinsic has no corresponding instruction.
@@ -2858,7 +2858,7 @@ pub unsafe fn _mm256_storeu2_m128(hiaddr: *mut f32, loaddr: *mut f32, a: __m256)
/// different 128-bit locations.
/// `hiaddr` and `loaddr` do not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu2_m128d)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu2_m128d)
#[inline]
#[target_feature(enable = "avx,sse2")]
// This intrinsic has no corresponding instruction.
@@ -2874,7 +2874,7 @@ pub unsafe fn _mm256_storeu2_m128d(hiaddr: *mut f64, loaddr: *mut f64, a: __m256
/// `a` into memory two different 128-bit locations.
/// `hiaddr` and `loaddr` do not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu2_m128i)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu2_m128i)
#[inline]
#[target_feature(enable = "avx,sse2")]
// This intrinsic has no corresponding instruction.
@@ -2888,7 +2888,7 @@ pub unsafe fn _mm256_storeu2_m128i(hiaddr: *mut __m128i, loaddr: *mut __m128i, a
/// Returns the first element of the input vector of `[8 x float]`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtss_f32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtss_f32)
#[inline]
#[target_feature(enable = "avx")]
//#[cfg_attr(test, assert_instr(movss))] FIXME
diff --git a/library/stdarch/crates/core_arch/src/x86/avx2.rs b/library/stdarch/crates/core_arch/src/x86/avx2.rs
index 8638b3136..5262628e1 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx2.rs
@@ -28,7 +28,7 @@ use stdarch_test::assert_instr;
/// Computes the absolute values of packed 32-bit integers in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_abs_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_abs_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpabsd))]
@@ -39,7 +39,7 @@ pub unsafe fn _mm256_abs_epi32(a: __m256i) -> __m256i {
/// Computes the absolute values of packed 16-bit integers in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_abs_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_abs_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpabsw))]
@@ -50,7 +50,7 @@ pub unsafe fn _mm256_abs_epi16(a: __m256i) -> __m256i {
/// Computes the absolute values of packed 8-bit integers in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_abs_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_abs_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpabsb))]
@@ -61,7 +61,7 @@ pub unsafe fn _mm256_abs_epi8(a: __m256i) -> __m256i {
/// Adds packed 64-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_add_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_add_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpaddq))]
@@ -72,7 +72,7 @@ pub unsafe fn _mm256_add_epi64(a: __m256i, b: __m256i) -> __m256i {
/// Adds packed 32-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_add_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_add_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpaddd))]
@@ -83,7 +83,7 @@ pub unsafe fn _mm256_add_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Adds packed 16-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_add_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_add_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpaddw))]
@@ -94,7 +94,7 @@ pub unsafe fn _mm256_add_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Adds packed 8-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_add_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_add_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpaddb))]
@@ -105,7 +105,7 @@ pub unsafe fn _mm256_add_epi8(a: __m256i, b: __m256i) -> __m256i {
/// Adds packed 8-bit integers in `a` and `b` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_adds_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_adds_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpaddsb))]
@@ -116,7 +116,7 @@ pub unsafe fn _mm256_adds_epi8(a: __m256i, b: __m256i) -> __m256i {
/// Adds packed 16-bit integers in `a` and `b` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_adds_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_adds_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpaddsw))]
@@ -127,7 +127,7 @@ pub unsafe fn _mm256_adds_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Adds packed unsigned 8-bit integers in `a` and `b` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_adds_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_adds_epu8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpaddusb))]
@@ -138,7 +138,7 @@ pub unsafe fn _mm256_adds_epu8(a: __m256i, b: __m256i) -> __m256i {
/// Adds packed unsigned 16-bit integers in `a` and `b` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_adds_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_adds_epu16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpaddusw))]
@@ -150,14 +150,14 @@ pub unsafe fn _mm256_adds_epu16(a: __m256i, b: __m256i) -> __m256i {
/// Concatenates pairs of 16-byte blocks in `a` and `b` into a 32-byte temporary
/// result, shifts the result right by `n` bytes, and returns the low 16 bytes.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_alignr_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_alignr_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpalignr, IMM8 = 7))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
// If palignr is shifting the pair of vectors more than the size of two
// lanes, emit zero.
if IMM8 > 32 {
@@ -175,7 +175,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
let b = b.as_i8x32();
let r: i8x32 = match IMM8 % 16 {
- 0 => simd_shuffle32!(
+ 0 => simd_shuffle!(
b,
a,
[
@@ -183,7 +183,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
23, 24, 25, 26, 27, 28, 29, 30, 31,
],
),
- 1 => simd_shuffle32!(
+ 1 => simd_shuffle!(
b,
a,
[
@@ -191,7 +191,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
24, 25, 26, 27, 28, 29, 30, 31, 48,
],
),
- 2 => simd_shuffle32!(
+ 2 => simd_shuffle!(
b,
a,
[
@@ -199,7 +199,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
25, 26, 27, 28, 29, 30, 31, 48, 49,
],
),
- 3 => simd_shuffle32!(
+ 3 => simd_shuffle!(
b,
a,
[
@@ -207,7 +207,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
25, 26, 27, 28, 29, 30, 31, 48, 49, 50,
],
),
- 4 => simd_shuffle32!(
+ 4 => simd_shuffle!(
b,
a,
[
@@ -215,7 +215,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
26, 27, 28, 29, 30, 31, 48, 49, 50, 51,
],
),
- 5 => simd_shuffle32!(
+ 5 => simd_shuffle!(
b,
a,
[
@@ -223,7 +223,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
27, 28, 29, 30, 31, 48, 49, 50, 51, 52,
],
),
- 6 => simd_shuffle32!(
+ 6 => simd_shuffle!(
b,
a,
[
@@ -231,7 +231,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
28, 29, 30, 31, 48, 49, 50, 51, 52, 53,
],
),
- 7 => simd_shuffle32!(
+ 7 => simd_shuffle!(
b,
a,
[
@@ -239,7 +239,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
28, 29, 30, 31, 48, 49, 50, 51, 52, 53, 54,
],
),
- 8 => simd_shuffle32!(
+ 8 => simd_shuffle!(
b,
a,
[
@@ -247,7 +247,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
29, 30, 31, 48, 49, 50, 51, 52, 53, 54, 55,
],
),
- 9 => simd_shuffle32!(
+ 9 => simd_shuffle!(
b,
a,
[
@@ -255,7 +255,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
30, 31, 48, 49, 50, 51, 52, 53, 54, 55, 56,
],
),
- 10 => simd_shuffle32!(
+ 10 => simd_shuffle!(
b,
a,
[
@@ -263,7 +263,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
31, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
],
),
- 11 => simd_shuffle32!(
+ 11 => simd_shuffle!(
b,
a,
[
@@ -271,7 +271,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
],
),
- 12 => simd_shuffle32!(
+ 12 => simd_shuffle!(
b,
a,
[
@@ -279,7 +279,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
],
),
- 13 => simd_shuffle32!(
+ 13 => simd_shuffle!(
b,
a,
[
@@ -287,7 +287,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
],
),
- 14 => simd_shuffle32!(
+ 14 => simd_shuffle!(
b,
a,
[
@@ -295,7 +295,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
],
),
- 15 => simd_shuffle32!(
+ 15 => simd_shuffle!(
b,
a,
[
@@ -311,7 +311,7 @@ pub unsafe fn _mm256_alignr_epi8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
/// Computes the bitwise AND of 256 bits (representing integer data)
/// in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_and_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_and_si256)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vandps))]
@@ -323,7 +323,7 @@ pub unsafe fn _mm256_and_si256(a: __m256i, b: __m256i) -> __m256i {
/// Computes the bitwise NOT of 256 bits (representing integer data)
/// in `a` and then AND with `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_andnot_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_andnot_si256)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vandnps))]
@@ -338,7 +338,7 @@ pub unsafe fn _mm256_andnot_si256(a: __m256i, b: __m256i) -> __m256i {
/// Averages packed unsigned 16-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_avg_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_avg_epu16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpavgw))]
@@ -349,7 +349,7 @@ pub unsafe fn _mm256_avg_epu16(a: __m256i, b: __m256i) -> __m256i {
/// Averages packed unsigned 8-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_avg_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_avg_epu8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpavgb))]
@@ -360,20 +360,20 @@ pub unsafe fn _mm256_avg_epu8(a: __m256i, b: __m256i) -> __m256i {
/// Blends packed 32-bit integers from `a` and `b` using control mask `IMM4`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blend_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vblendps, IMM4 = 9))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_blend_epi32<const IMM4: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
let a = a.as_i32x4();
let b = b.as_i32x4();
- let r: i32x4 = simd_shuffle4!(
+ let r: i32x4 = simd_shuffle!(
a,
b,
- <const IMM4: i32> [
+ [
[0, 4, 0, 4][IMM4 as usize & 0b11],
[1, 1, 5, 5][IMM4 as usize & 0b11],
[2, 6, 2, 6][(IMM4 as usize >> 2) & 0b11],
@@ -385,20 +385,20 @@ pub unsafe fn _mm_blend_epi32<const IMM4: i32>(a: __m128i, b: __m128i) -> __m128
/// Blends packed 32-bit integers from `a` and `b` using control mask `IMM8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_blend_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_blend_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vblendps, IMM8 = 9))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_blend_epi32<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x8();
let b = b.as_i32x8();
- let r: i32x8 = simd_shuffle8!(
+ let r: i32x8 = simd_shuffle!(
a,
b,
- <const IMM8: i32> [
+ [
[0, 8, 0, 8][IMM8 as usize & 0b11],
[1, 1, 9, 9][IMM8 as usize & 0b11],
[2, 10, 2, 10][(IMM8 as usize >> 2) & 0b11],
@@ -414,21 +414,21 @@ pub unsafe fn _mm256_blend_epi32<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
/// Blends packed 16-bit integers from `a` and `b` using control mask `IMM8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_blend_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_blend_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpblendw, IMM8 = 9))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_blend_epi16<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x16();
let b = b.as_i16x16();
- let r: i16x16 = simd_shuffle16!(
+ let r: i16x16 = simd_shuffle!(
a,
b,
- <const IMM8: i32> [
+ [
[0, 16, 0, 16][IMM8 as usize & 0b11],
[1, 1, 17, 17][IMM8 as usize & 0b11],
[2, 18, 2, 18][(IMM8 as usize >> 2) & 0b11],
@@ -452,7 +452,7 @@ pub unsafe fn _mm256_blend_epi16<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
/// Blends packed 8-bit integers from `a` and `b` using `mask`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_blendv_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_blendv_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpblendvb))]
@@ -464,28 +464,28 @@ pub unsafe fn _mm256_blendv_epi8(a: __m256i, b: __m256i, mask: __m256i) -> __m25
/// Broadcasts the low packed 8-bit integer from `a` to all elements of
/// the 128-bit returned value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_broadcastb_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcastb_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_broadcastb_epi8(a: __m128i) -> __m128i {
let zero = _mm_setzero_si128();
- let ret = simd_shuffle16!(a.as_i8x16(), zero.as_i8x16(), [0_u32; 16]);
+ let ret = simd_shuffle!(a.as_i8x16(), zero.as_i8x16(), [0_u32; 16]);
transmute::<i8x16, _>(ret)
}
/// Broadcasts the low packed 8-bit integer from `a` to all elements of
/// the 256-bit returned value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcastb_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastb_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_broadcastb_epi8(a: __m128i) -> __m256i {
let zero = _mm_setzero_si128();
- let ret = simd_shuffle32!(a.as_i8x16(), zero.as_i8x16(), [0_u32; 32]);
+ let ret = simd_shuffle!(a.as_i8x16(), zero.as_i8x16(), [0_u32; 32]);
transmute::<i8x32, _>(ret)
}
@@ -494,14 +494,14 @@ pub unsafe fn _mm256_broadcastb_epi8(a: __m128i) -> __m256i {
/// Broadcasts the low packed 32-bit integer from `a` to all elements of
/// the 128-bit returned value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_broadcastd_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcastd_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_broadcastd_epi32(a: __m128i) -> __m128i {
let zero = _mm_setzero_si128();
- let ret = simd_shuffle4!(a.as_i32x4(), zero.as_i32x4(), [0_u32; 4]);
+ let ret = simd_shuffle!(a.as_i32x4(), zero.as_i32x4(), [0_u32; 4]);
transmute::<i32x4, _>(ret)
}
@@ -510,66 +510,67 @@ pub unsafe fn _mm_broadcastd_epi32(a: __m128i) -> __m128i {
/// Broadcasts the low packed 32-bit integer from `a` to all elements of
/// the 256-bit returned value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcastd_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastd_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_broadcastd_epi32(a: __m128i) -> __m256i {
let zero = _mm_setzero_si128();
- let ret = simd_shuffle8!(a.as_i32x4(), zero.as_i32x4(), [0_u32; 8]);
+ let ret = simd_shuffle!(a.as_i32x4(), zero.as_i32x4(), [0_u32; 8]);
transmute::<i32x8, _>(ret)
}
/// Broadcasts the low packed 64-bit integer from `a` to all elements of
/// the 128-bit returned value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_broadcastq_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcastq_epi64)
#[inline]
#[target_feature(enable = "avx2")]
-// FIXME: https://github.com/rust-lang/stdarch/issues/791
+// Emits `vmovddup` instead of `vpbroadcastq`
+// See https://github.com/rust-lang/stdarch/issues/791
#[cfg_attr(test, assert_instr(vmovddup))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_broadcastq_epi64(a: __m128i) -> __m128i {
- let ret = simd_shuffle2!(a.as_i64x2(), a.as_i64x2(), [0_u32; 2]);
+ let ret = simd_shuffle!(a.as_i64x2(), a.as_i64x2(), [0_u32; 2]);
transmute::<i64x2, _>(ret)
}
/// Broadcasts the low packed 64-bit integer from `a` to all elements of
/// the 256-bit returned value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcastq_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastq_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vbroadcastsd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_broadcastq_epi64(a: __m128i) -> __m256i {
- let ret = simd_shuffle4!(a.as_i64x2(), a.as_i64x2(), [0_u32; 4]);
+ let ret = simd_shuffle!(a.as_i64x2(), a.as_i64x2(), [0_u32; 4]);
transmute::<i64x4, _>(ret)
}
/// Broadcasts the low double-precision (64-bit) floating-point element
/// from `a` to all elements of the 128-bit returned value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_broadcastsd_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcastsd_pd)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vmovddup))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_broadcastsd_pd(a: __m128d) -> __m128d {
- simd_shuffle2!(a, _mm_setzero_pd(), [0_u32; 2])
+ simd_shuffle!(a, _mm_setzero_pd(), [0_u32; 2])
}
/// Broadcasts the low double-precision (64-bit) floating-point element
/// from `a` to all elements of the 256-bit returned value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcastsd_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastsd_pd)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vbroadcastsd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_broadcastsd_pd(a: __m128d) -> __m256d {
- simd_shuffle4!(a, _mm_setzero_pd(), [0_u32; 4])
+ simd_shuffle!(a, _mm_setzero_pd(), [0_u32; 4])
}
// N.B., `broadcastsi128_si256` is often compiled to `vinsertf128` or
@@ -577,71 +578,71 @@ pub unsafe fn _mm256_broadcastsd_pd(a: __m128d) -> __m256d {
/// Broadcasts 128 bits of integer data from a to all 128-bit lanes in
/// the 256-bit returned value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcastsi128_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastsi128_si256)
#[inline]
#[target_feature(enable = "avx2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_broadcastsi128_si256(a: __m128i) -> __m256i {
let zero = _mm_setzero_si128();
- let ret = simd_shuffle4!(a.as_i64x2(), zero.as_i64x2(), [0, 1, 0, 1]);
+ let ret = simd_shuffle!(a.as_i64x2(), zero.as_i64x2(), [0, 1, 0, 1]);
transmute::<i64x4, _>(ret)
}
/// Broadcasts the low single-precision (32-bit) floating-point element
/// from `a` to all elements of the 128-bit returned value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_broadcastss_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcastss_ps)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_broadcastss_ps(a: __m128) -> __m128 {
- simd_shuffle4!(a, _mm_setzero_ps(), [0_u32; 4])
+ simd_shuffle!(a, _mm_setzero_ps(), [0_u32; 4])
}
/// Broadcasts the low single-precision (32-bit) floating-point element
/// from `a` to all elements of the 256-bit returned value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcastss_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastss_ps)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_broadcastss_ps(a: __m128) -> __m256 {
- simd_shuffle8!(a, _mm_setzero_ps(), [0_u32; 8])
+ simd_shuffle!(a, _mm_setzero_ps(), [0_u32; 8])
}
/// Broadcasts the low packed 16-bit integer from a to all elements of
/// the 128-bit returned value
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_broadcastw_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcastw_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_broadcastw_epi16(a: __m128i) -> __m128i {
let zero = _mm_setzero_si128();
- let ret = simd_shuffle8!(a.as_i16x8(), zero.as_i16x8(), [0_u32; 8]);
+ let ret = simd_shuffle!(a.as_i16x8(), zero.as_i16x8(), [0_u32; 8]);
transmute::<i16x8, _>(ret)
}
/// Broadcasts the low packed 16-bit integer from a to all elements of
/// the 256-bit returned value
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcastw_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastw_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_broadcastw_epi16(a: __m128i) -> __m256i {
let zero = _mm_setzero_si128();
- let ret = simd_shuffle16!(a.as_i16x8(), zero.as_i16x8(), [0_u32; 16]);
+ let ret = simd_shuffle!(a.as_i16x8(), zero.as_i16x8(), [0_u32; 16]);
transmute::<i16x16, _>(ret)
}
/// Compares packed 64-bit integers in `a` and `b` for equality.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpeq_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpcmpeqq))]
@@ -652,7 +653,7 @@ pub unsafe fn _mm256_cmpeq_epi64(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed 32-bit integers in `a` and `b` for equality.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpeq_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpcmpeqd))]
@@ -663,7 +664,7 @@ pub unsafe fn _mm256_cmpeq_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed 16-bit integers in `a` and `b` for equality.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpeq_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpcmpeqw))]
@@ -674,7 +675,7 @@ pub unsafe fn _mm256_cmpeq_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed 8-bit integers in `a` and `b` for equality.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpeq_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpcmpeqb))]
@@ -685,7 +686,7 @@ pub unsafe fn _mm256_cmpeq_epi8(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed 64-bit integers in `a` and `b` for greater-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpgt_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpcmpgtq))]
@@ -696,7 +697,7 @@ pub unsafe fn _mm256_cmpgt_epi64(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed 32-bit integers in `a` and `b` for greater-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpgt_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpcmpgtd))]
@@ -707,7 +708,7 @@ pub unsafe fn _mm256_cmpgt_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed 16-bit integers in `a` and `b` for greater-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpgt_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpcmpgtw))]
@@ -718,7 +719,7 @@ pub unsafe fn _mm256_cmpgt_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed 8-bit integers in `a` and `b` for greater-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpgt_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpcmpgtb))]
@@ -729,7 +730,7 @@ pub unsafe fn _mm256_cmpgt_epi8(a: __m256i, b: __m256i) -> __m256i {
/// Sign-extend 16-bit integers to 32-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi16_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi16_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovsxwd))]
@@ -740,20 +741,20 @@ pub unsafe fn _mm256_cvtepi16_epi32(a: __m128i) -> __m256i {
/// Sign-extend 16-bit integers to 64-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi16_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi16_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovsxwq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_cvtepi16_epi64(a: __m128i) -> __m256i {
let a = a.as_i16x8();
- let v64: i16x4 = simd_shuffle4!(a, a, [0, 1, 2, 3]);
+ let v64: i16x4 = simd_shuffle!(a, a, [0, 1, 2, 3]);
transmute::<i64x4, _>(simd_cast(v64))
}
/// Sign-extend 32-bit integers to 64-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi32_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi32_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovsxdq))]
@@ -764,7 +765,7 @@ pub unsafe fn _mm256_cvtepi32_epi64(a: __m128i) -> __m256i {
/// Sign-extend 8-bit integers to 16-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi8_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi8_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovsxbw))]
@@ -775,34 +776,34 @@ pub unsafe fn _mm256_cvtepi8_epi16(a: __m128i) -> __m256i {
/// Sign-extend 8-bit integers to 32-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi8_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi8_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovsxbd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_cvtepi8_epi32(a: __m128i) -> __m256i {
let a = a.as_i8x16();
- let v64: i8x8 = simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
+ let v64: i8x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
transmute::<i32x8, _>(simd_cast(v64))
}
/// Sign-extend 8-bit integers to 64-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi8_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi8_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovsxbq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_cvtepi8_epi64(a: __m128i) -> __m256i {
let a = a.as_i8x16();
- let v32: i8x4 = simd_shuffle4!(a, a, [0, 1, 2, 3]);
+ let v32: i8x4 = simd_shuffle!(a, a, [0, 1, 2, 3]);
transmute::<i64x4, _>(simd_cast(v32))
}
/// Zeroes extend packed unsigned 16-bit integers in `a` to packed 32-bit
/// integers, and stores the results in `dst`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu16_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepu16_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovzxwd))]
@@ -814,20 +815,20 @@ pub unsafe fn _mm256_cvtepu16_epi32(a: __m128i) -> __m256i {
/// Zero-extend the lower four unsigned 16-bit integers in `a` to 64-bit
/// integers. The upper four elements of `a` are unused.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu16_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepu16_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovzxwq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_cvtepu16_epi64(a: __m128i) -> __m256i {
let a = a.as_u16x8();
- let v64: u16x4 = simd_shuffle4!(a, a, [0, 1, 2, 3]);
+ let v64: u16x4 = simd_shuffle!(a, a, [0, 1, 2, 3]);
transmute::<i64x4, _>(simd_cast(v64))
}
/// Zero-extend unsigned 32-bit integers in `a` to 64-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu32_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepu32_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovzxdq))]
@@ -838,7 +839,7 @@ pub unsafe fn _mm256_cvtepu32_epi64(a: __m128i) -> __m256i {
/// Zero-extend unsigned 8-bit integers in `a` to 16-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu8_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepu8_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovzxbw))]
@@ -850,34 +851,34 @@ pub unsafe fn _mm256_cvtepu8_epi16(a: __m128i) -> __m256i {
/// Zero-extend the lower eight unsigned 8-bit integers in `a` to 32-bit
/// integers. The upper eight elements of `a` are unused.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu8_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepu8_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovzxbd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_cvtepu8_epi32(a: __m128i) -> __m256i {
let a = a.as_u8x16();
- let v64: u8x8 = simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
+ let v64: u8x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
transmute::<i32x8, _>(simd_cast(v64))
}
/// Zero-extend the lower four unsigned 8-bit integers in `a` to 64-bit
/// integers. The upper twelve elements of `a` are unused.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu8_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepu8_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovzxbq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_cvtepu8_epi64(a: __m128i) -> __m256i {
let a = a.as_u8x16();
- let v32: u8x4 = simd_shuffle4!(a, a, [0, 1, 2, 3]);
+ let v32: u8x4 = simd_shuffle!(a, a, [0, 1, 2, 3]);
transmute::<i64x4, _>(simd_cast(v32))
}
/// Extracts 128 bits (of integer data) from `a` selected with `IMM1`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extracti128_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extracti128_si256)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(
@@ -887,16 +888,16 @@ pub unsafe fn _mm256_cvtepu8_epi64(a: __m128i) -> __m256i {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_extracti128_si256<const IMM1: i32>(a: __m256i) -> __m128i {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
let a = a.as_i64x4();
let b = _mm256_undefined_si256().as_i64x4();
- let dst: i64x2 = simd_shuffle2!(a, b, <const IMM1: i32> [[0, 1], [2, 3]][IMM1 as usize]);
+ let dst: i64x2 = simd_shuffle!(a, b, [[0, 1], [2, 3]][IMM1 as usize]);
transmute(dst)
}
/// Horizontally adds adjacent pairs of 16-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hadd_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_hadd_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vphaddw))]
@@ -907,7 +908,7 @@ pub unsafe fn _mm256_hadd_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Horizontally adds adjacent pairs of 32-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hadd_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_hadd_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vphaddd))]
@@ -919,7 +920,7 @@ pub unsafe fn _mm256_hadd_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Horizontally adds adjacent pairs of 16-bit integers in `a` and `b`
/// using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hadds_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_hadds_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vphaddsw))]
@@ -930,7 +931,7 @@ pub unsafe fn _mm256_hadds_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Horizontally subtract adjacent pairs of 16-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hsub_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_hsub_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vphsubw))]
@@ -941,7 +942,7 @@ pub unsafe fn _mm256_hsub_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Horizontally subtract adjacent pairs of 32-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hsub_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_hsub_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vphsubd))]
@@ -953,7 +954,7 @@ pub unsafe fn _mm256_hsub_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Horizontally subtract adjacent pairs of 16-bit integers in `a` and `b`
/// using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hsubs_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_hsubs_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vphsubsw))]
@@ -966,7 +967,7 @@ pub unsafe fn _mm256_hsubs_epi16(a: __m256i, b: __m256i) -> __m256i {
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_i32gather_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_i32gather_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherdd, SCALE = 1))]
@@ -990,7 +991,7 @@ pub unsafe fn _mm_i32gather_epi32<const SCALE: i32>(
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_i32gather_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_i32gather_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherdd, SCALE = 1))]
@@ -1015,7 +1016,7 @@ pub unsafe fn _mm_mask_i32gather_epi32<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_i32gather_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_i32gather_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherdd, SCALE = 1))]
@@ -1039,7 +1040,7 @@ pub unsafe fn _mm256_i32gather_epi32<const SCALE: i32>(
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_i32gather_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_i32gather_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherdd, SCALE = 1))]
@@ -1064,7 +1065,7 @@ pub unsafe fn _mm256_mask_i32gather_epi32<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_i32gather_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_i32gather_ps)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherdps, SCALE = 1))]
@@ -1084,7 +1085,7 @@ pub unsafe fn _mm_i32gather_ps<const SCALE: i32>(slice: *const f32, offsets: __m
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_i32gather_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_i32gather_ps)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherdps, SCALE = 1))]
@@ -1106,7 +1107,7 @@ pub unsafe fn _mm_mask_i32gather_ps<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_i32gather_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_i32gather_ps)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherdps, SCALE = 1))]
@@ -1126,7 +1127,7 @@ pub unsafe fn _mm256_i32gather_ps<const SCALE: i32>(slice: *const f32, offsets:
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_i32gather_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_i32gather_ps)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherdps, SCALE = 1))]
@@ -1148,7 +1149,7 @@ pub unsafe fn _mm256_mask_i32gather_ps<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_i32gather_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_i32gather_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))]
@@ -1172,7 +1173,7 @@ pub unsafe fn _mm_i32gather_epi64<const SCALE: i32>(
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_i32gather_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_i32gather_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))]
@@ -1197,7 +1198,7 @@ pub unsafe fn _mm_mask_i32gather_epi64<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_i32gather_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_i32gather_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))]
@@ -1221,7 +1222,7 @@ pub unsafe fn _mm256_i32gather_epi64<const SCALE: i32>(
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_i32gather_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_i32gather_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))]
@@ -1246,7 +1247,7 @@ pub unsafe fn _mm256_mask_i32gather_epi64<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_i32gather_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_i32gather_pd)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))]
@@ -1266,7 +1267,7 @@ pub unsafe fn _mm_i32gather_pd<const SCALE: i32>(slice: *const f64, offsets: __m
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_i32gather_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_i32gather_pd)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))]
@@ -1288,7 +1289,7 @@ pub unsafe fn _mm_mask_i32gather_pd<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_i32gather_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_i32gather_pd)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))]
@@ -1311,7 +1312,7 @@ pub unsafe fn _mm256_i32gather_pd<const SCALE: i32>(
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_i32gather_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_i32gather_pd)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))]
@@ -1333,7 +1334,7 @@ pub unsafe fn _mm256_mask_i32gather_pd<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_i64gather_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_i64gather_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherqd, SCALE = 1))]
@@ -1357,7 +1358,7 @@ pub unsafe fn _mm_i64gather_epi32<const SCALE: i32>(
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_i64gather_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_i64gather_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherqd, SCALE = 1))]
@@ -1382,7 +1383,7 @@ pub unsafe fn _mm_mask_i64gather_epi32<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_i64gather_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_i64gather_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherqd, SCALE = 1))]
@@ -1406,7 +1407,7 @@ pub unsafe fn _mm256_i64gather_epi32<const SCALE: i32>(
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_i64gather_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_i64gather_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherqd, SCALE = 1))]
@@ -1431,7 +1432,7 @@ pub unsafe fn _mm256_mask_i64gather_epi32<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_i64gather_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_i64gather_ps)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))]
@@ -1451,7 +1452,7 @@ pub unsafe fn _mm_i64gather_ps<const SCALE: i32>(slice: *const f32, offsets: __m
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_i64gather_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_i64gather_ps)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))]
@@ -1473,7 +1474,7 @@ pub unsafe fn _mm_mask_i64gather_ps<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_i64gather_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_i64gather_ps)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))]
@@ -1493,7 +1494,7 @@ pub unsafe fn _mm256_i64gather_ps<const SCALE: i32>(slice: *const f32, offsets:
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_i64gather_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_i64gather_ps)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))]
@@ -1515,7 +1516,7 @@ pub unsafe fn _mm256_mask_i64gather_ps<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_i64gather_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_i64gather_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherqq, SCALE = 1))]
@@ -1539,7 +1540,7 @@ pub unsafe fn _mm_i64gather_epi64<const SCALE: i32>(
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_i64gather_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_i64gather_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherqq, SCALE = 1))]
@@ -1564,7 +1565,7 @@ pub unsafe fn _mm_mask_i64gather_epi64<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_i64gather_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_i64gather_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherqq, SCALE = 1))]
@@ -1588,7 +1589,7 @@ pub unsafe fn _mm256_i64gather_epi64<const SCALE: i32>(
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_i64gather_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_i64gather_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpgatherqq, SCALE = 1))]
@@ -1613,7 +1614,7 @@ pub unsafe fn _mm256_mask_i64gather_epi64<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_i64gather_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_i64gather_pd)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))]
@@ -1633,7 +1634,7 @@ pub unsafe fn _mm_i64gather_pd<const SCALE: i32>(slice: *const f64, offsets: __m
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_i64gather_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_i64gather_pd)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))]
@@ -1655,7 +1656,7 @@ pub unsafe fn _mm_mask_i64gather_pd<const SCALE: i32>(
/// where
/// `scale` should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_i64gather_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_i64gather_pd)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))]
@@ -1678,7 +1679,7 @@ pub unsafe fn _mm256_i64gather_pd<const SCALE: i32>(
/// `scale` should be 1, 2, 4 or 8. If mask is set, load the value from `src` in
/// that position instead.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_i64gather_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_i64gather_pd)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))]
@@ -1699,7 +1700,7 @@ pub unsafe fn _mm256_mask_i64gather_pd<const SCALE: i32>(
/// Copies `a` to `dst`, then insert 128 bits (of integer data) from `b` at the
/// location specified by `IMM1`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_inserti128_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_inserti128_si256)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(
@@ -1709,11 +1710,10 @@ pub unsafe fn _mm256_mask_i64gather_pd<const SCALE: i32>(
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_inserti128_si256<const IMM1: i32>(a: __m256i, b: __m128i) -> __m256i {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
let a = a.as_i64x4();
let b = _mm256_castsi128_si256(b).as_i64x4();
- let dst: i64x4 =
- simd_shuffle4!(a, b, <const IMM1: i32> [[4, 5, 2, 3], [0, 1, 4, 5]][IMM1 as usize]);
+ let dst: i64x4 = simd_shuffle!(a, b, [[4, 5, 2, 3], [0, 1, 4, 5]][IMM1 as usize]);
transmute(dst)
}
@@ -1721,7 +1721,7 @@ pub unsafe fn _mm256_inserti128_si256<const IMM1: i32>(a: __m256i, b: __m128i) -
/// intermediate signed 32-bit integers. Horizontally add adjacent pairs
/// of intermediate 32-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_madd_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_madd_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaddwd))]
@@ -1735,7 +1735,7 @@ pub unsafe fn _mm256_madd_epi16(a: __m256i, b: __m256i) -> __m256i {
/// signed 16-bit integers. Horizontally add adjacent pairs of intermediate
/// signed 16-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maddubs_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maddubs_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaddubsw))]
@@ -1748,7 +1748,7 @@ pub unsafe fn _mm256_maddubs_epi16(a: __m256i, b: __m256i) -> __m256i {
/// (elements are zeroed out when the highest bit is not set in the
/// corresponding element).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskload_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskload_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaskmovd))]
@@ -1761,7 +1761,7 @@ pub unsafe fn _mm_maskload_epi32(mem_addr: *const i32, mask: __m128i) -> __m128i
/// (elements are zeroed out when the highest bit is not set in the
/// corresponding element).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskload_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskload_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaskmovd))]
@@ -1774,7 +1774,7 @@ pub unsafe fn _mm256_maskload_epi32(mem_addr: *const i32, mask: __m256i) -> __m2
/// (elements are zeroed out when the highest bit is not set in the
/// corresponding element).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskload_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskload_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaskmovq))]
@@ -1787,7 +1787,7 @@ pub unsafe fn _mm_maskload_epi64(mem_addr: *const i64, mask: __m128i) -> __m128i
/// (elements are zeroed out when the highest bit is not set in the
/// corresponding element).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskload_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskload_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaskmovq))]
@@ -1800,7 +1800,7 @@ pub unsafe fn _mm256_maskload_epi64(mem_addr: *const i64, mask: __m256i) -> __m2
/// using `mask` (elements are not stored when the highest bit is not set
/// in the corresponding element).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskstore_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskstore_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaskmovd))]
@@ -1813,7 +1813,7 @@ pub unsafe fn _mm_maskstore_epi32(mem_addr: *mut i32, mask: __m128i, a: __m128i)
/// using `mask` (elements are not stored when the highest bit is not set
/// in the corresponding element).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskstore_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskstore_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaskmovd))]
@@ -1826,7 +1826,7 @@ pub unsafe fn _mm256_maskstore_epi32(mem_addr: *mut i32, mask: __m256i, a: __m25
/// using `mask` (elements are not stored when the highest bit is not set
/// in the corresponding element).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskstore_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskstore_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaskmovq))]
@@ -1839,7 +1839,7 @@ pub unsafe fn _mm_maskstore_epi64(mem_addr: *mut i64, mask: __m128i, a: __m128i)
/// using `mask` (elements are not stored when the highest bit is not set
/// in the corresponding element).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskstore_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskstore_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaskmovq))]
@@ -1851,7 +1851,7 @@ pub unsafe fn _mm256_maskstore_epi64(mem_addr: *mut i64, mask: __m256i, a: __m25
/// Compares packed 16-bit integers in `a` and `b`, and returns the packed
/// maximum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_max_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaxsw))]
@@ -1865,7 +1865,7 @@ pub unsafe fn _mm256_max_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed 32-bit integers in `a` and `b`, and returns the packed
/// maximum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_max_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaxsd))]
@@ -1879,7 +1879,7 @@ pub unsafe fn _mm256_max_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed 8-bit integers in `a` and `b`, and returns the packed
/// maximum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_max_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaxsb))]
@@ -1893,7 +1893,7 @@ pub unsafe fn _mm256_max_epi8(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed unsigned 16-bit integers in `a` and `b`, and returns
/// the packed maximum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_max_epu16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaxuw))]
@@ -1907,7 +1907,7 @@ pub unsafe fn _mm256_max_epu16(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed unsigned 32-bit integers in `a` and `b`, and returns
/// the packed maximum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_max_epu32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaxud))]
@@ -1921,7 +1921,7 @@ pub unsafe fn _mm256_max_epu32(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed unsigned 8-bit integers in `a` and `b`, and returns
/// the packed maximum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_max_epu8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmaxub))]
@@ -1935,7 +1935,7 @@ pub unsafe fn _mm256_max_epu8(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed 16-bit integers in `a` and `b`, and returns the packed
/// minimum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_min_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpminsw))]
@@ -1949,7 +1949,7 @@ pub unsafe fn _mm256_min_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed 32-bit integers in `a` and `b`, and returns the packed
/// minimum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_min_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpminsd))]
@@ -1963,7 +1963,7 @@ pub unsafe fn _mm256_min_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed 8-bit integers in `a` and `b`, and returns the packed
/// minimum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_min_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpminsb))]
@@ -1977,7 +1977,7 @@ pub unsafe fn _mm256_min_epi8(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed unsigned 16-bit integers in `a` and `b`, and returns
/// the packed minimum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_min_epu16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpminuw))]
@@ -1991,7 +1991,7 @@ pub unsafe fn _mm256_min_epu16(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed unsigned 32-bit integers in `a` and `b`, and returns
/// the packed minimum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_min_epu32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpminud))]
@@ -2005,7 +2005,7 @@ pub unsafe fn _mm256_min_epu32(a: __m256i, b: __m256i) -> __m256i {
/// Compares packed unsigned 8-bit integers in `a` and `b`, and returns
/// the packed minimum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_min_epu8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpminub))]
@@ -2019,7 +2019,7 @@ pub unsafe fn _mm256_min_epu8(a: __m256i, b: __m256i) -> __m256i {
/// Creates mask from the most significant bit of each 8-bit element in `a`,
/// return the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movemask_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movemask_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmovmskb))]
@@ -2038,14 +2038,14 @@ pub unsafe fn _mm256_movemask_epi8(a: __m256i) -> i32 {
/// quadruplets are formed from sequential 8-bit integers selected from `a`
/// starting at the offset specified in `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mpsadbw_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mpsadbw_epu8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vmpsadbw, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_mpsadbw_epu8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(mpsadbw(a.as_u8x32(), b.as_u8x32(), IMM8))
}
@@ -2054,7 +2054,7 @@ pub unsafe fn _mm256_mpsadbw_epu8<const IMM8: i32>(a: __m256i, b: __m256i) -> __
///
/// Returns the 64-bit results.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mul_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mul_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmuldq))]
@@ -2068,7 +2068,7 @@ pub unsafe fn _mm256_mul_epi32(a: __m256i, b: __m256i) -> __m256i {
///
/// Returns the unsigned 64-bit results.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mul_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mul_epu32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmuludq))]
@@ -2081,7 +2081,7 @@ pub unsafe fn _mm256_mul_epu32(a: __m256i, b: __m256i) -> __m256i {
/// intermediate 32-bit integers and returning the high 16 bits of the
/// intermediate integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mulhi_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mulhi_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmulhw))]
@@ -2094,7 +2094,7 @@ pub unsafe fn _mm256_mulhi_epi16(a: __m256i, b: __m256i) -> __m256i {
/// intermediate 32-bit integers and returning the high 16 bits of the
/// intermediate integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mulhi_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mulhi_epu16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmulhuw))]
@@ -2107,7 +2107,7 @@ pub unsafe fn _mm256_mulhi_epu16(a: __m256i, b: __m256i) -> __m256i {
/// intermediate 32-bit integers, and returns the low 16 bits of the
/// intermediate integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mullo_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mullo_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmullw))]
@@ -2120,7 +2120,7 @@ pub unsafe fn _mm256_mullo_epi16(a: __m256i, b: __m256i) -> __m256i {
/// intermediate 64-bit integers, and returns the low 32 bits of the
/// intermediate integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mullo_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mullo_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmulld))]
@@ -2134,7 +2134,7 @@ pub unsafe fn _mm256_mullo_epi32(a: __m256i, b: __m256i) -> __m256i {
/// integer to the 18 most significant bits, round by adding 1, and
/// return bits `[16:1]`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mulhrs_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mulhrs_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpmulhrsw))]
@@ -2146,7 +2146,7 @@ pub unsafe fn _mm256_mulhrs_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Computes the bitwise OR of 256 bits (representing integer data) in `a`
/// and `b`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_or_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_or_si256)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vorps))]
@@ -2158,7 +2158,7 @@ pub unsafe fn _mm256_or_si256(a: __m256i, b: __m256i) -> __m256i {
/// Converts packed 16-bit integers from `a` and `b` to packed 8-bit integers
/// using signed saturation
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_packs_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_packs_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpacksswb))]
@@ -2170,7 +2170,7 @@ pub unsafe fn _mm256_packs_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Converts packed 32-bit integers from `a` and `b` to packed 16-bit integers
/// using signed saturation
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_packs_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_packs_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpackssdw))]
@@ -2182,7 +2182,7 @@ pub unsafe fn _mm256_packs_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Converts packed 16-bit integers from `a` and `b` to packed 8-bit integers
/// using unsigned saturation
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_packus_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_packus_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpackuswb))]
@@ -2194,7 +2194,7 @@ pub unsafe fn _mm256_packus_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Converts packed 32-bit integers from `a` and `b` to packed 16-bit integers
/// using unsigned saturation
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_packus_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_packus_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpackusdw))]
@@ -2208,7 +2208,7 @@ pub unsafe fn _mm256_packus_epi32(a: __m256i, b: __m256i) -> __m256i {
/// The last 3 bits of each integer of `b` are used as addresses into the 8
/// integers of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutevar8x32_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutevar8x32_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpermps))]
@@ -2219,19 +2219,19 @@ pub unsafe fn _mm256_permutevar8x32_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Permutes 64-bit integers from `a` using control mask `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute4x64_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permute4x64_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpermpd, IMM8 = 9))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_permute4x64_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let zero = _mm256_setzero_si256().as_i64x4();
- let r: i64x4 = simd_shuffle4!(
+ let r: i64x4 = simd_shuffle!(
a.as_i64x4(),
zero,
- <const IMM8: i32> [
+ [
IMM8 as u32 & 0b11,
(IMM8 as u32 >> 2) & 0b11,
(IMM8 as u32 >> 4) & 0b11,
@@ -2243,32 +2243,32 @@ pub unsafe fn _mm256_permute4x64_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
/// Shuffles 128-bits of integer data selected by `imm8` from `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute2x128_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permute2x128_si256)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vperm2f128, IMM8 = 9))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_permute2x128_si256<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(vperm2i128(a.as_i64x4(), b.as_i64x4(), IMM8 as i8))
}
/// Shuffles 64-bit floating-point elements in `a` across lanes using the
/// control in `imm8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute4x64_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permute4x64_pd)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpermpd, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_permute4x64_pd<const IMM8: i32>(a: __m256d) -> __m256d {
- static_assert_imm8!(IMM8);
- simd_shuffle4!(
+ static_assert_uimm_bits!(IMM8, 8);
+ simd_shuffle!(
a,
_mm256_undefined_pd(),
- <const IMM8: i32> [
+ [
IMM8 as u32 & 0b11,
(IMM8 as u32 >> 2) & 0b11,
(IMM8 as u32 >> 4) & 0b11,
@@ -2277,10 +2277,10 @@ pub unsafe fn _mm256_permute4x64_pd<const IMM8: i32>(a: __m256d) -> __m256d {
)
}
-/// Shuffles eight 32-bit foating-point elements in `a` across lanes using
+/// Shuffles eight 32-bit floating-point elements in `a` across lanes using
/// the corresponding 32-bit integer index in `idx`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutevar8x32_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutevar8x32_ps)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpermps))]
@@ -2294,7 +2294,7 @@ pub unsafe fn _mm256_permutevar8x32_ps(a: __m256, idx: __m256i) -> __m256 {
/// produce four unsigned 16-bit integers, and pack these unsigned 16-bit
/// integers in the low 16 bits of the 64-bit return value
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sad_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sad_epu8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsadbw))]
@@ -2332,7 +2332,7 @@ pub unsafe fn _mm256_sad_epu8(a: __m256i, b: __m256i) -> __m256i {
/// }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shuffle_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpshufb))]
@@ -2370,18 +2370,18 @@ pub unsafe fn _mm256_shuffle_epi8(a: __m256i, b: __m256i) -> __m256i {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shuffle_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpermilps, MASK = 9))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_shuffle_epi32<const MASK: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(MASK);
- let r: i32x8 = simd_shuffle8!(
+ static_assert_uimm_bits!(MASK, 8);
+ let r: i32x8 = simd_shuffle!(
a.as_i32x8(),
a.as_i32x8(),
- <const MASK: i32> [
+ [
MASK as u32 & 0b11,
(MASK as u32 >> 2) & 0b11,
(MASK as u32 >> 4) & 0b11,
@@ -2399,19 +2399,19 @@ pub unsafe fn _mm256_shuffle_epi32<const MASK: i32>(a: __m256i) -> __m256i {
/// the control in `imm8`. The low 64 bits of 128-bit lanes of `a` are copied
/// to the output.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shufflehi_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shufflehi_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 9))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_shufflehi_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x16();
- let r: i16x16 = simd_shuffle16!(
+ let r: i16x16 = simd_shuffle!(
a,
a,
- <const IMM8: i32> [
+ [
0,
1,
2,
@@ -2437,19 +2437,19 @@ pub unsafe fn _mm256_shufflehi_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
/// the control in `imm8`. The high 64 bits of 128-bit lanes of `a` are copied
/// to the output.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shufflelo_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shufflelo_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 9))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_shufflelo_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x16();
- let r: i16x16 = simd_shuffle16!(
+ let r: i16x16 = simd_shuffle!(
a,
a,
- <const IMM8: i32> [
+ [
0 + (IMM8 as u32 & 0b11),
0 + ((IMM8 as u32 >> 2) & 0b11),
0 + ((IMM8 as u32 >> 4) & 0b11),
@@ -2475,7 +2475,7 @@ pub unsafe fn _mm256_shufflelo_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
/// 16-bit integer in `b` is negative, and returns the results.
/// Results are zeroed out when the corresponding element in `b` is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sign_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sign_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsignw))]
@@ -2488,7 +2488,7 @@ pub unsafe fn _mm256_sign_epi16(a: __m256i, b: __m256i) -> __m256i {
/// 32-bit integer in `b` is negative, and returns the results.
/// Results are zeroed out when the corresponding element in `b` is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sign_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sign_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsignd))]
@@ -2501,7 +2501,7 @@ pub unsafe fn _mm256_sign_epi32(a: __m256i, b: __m256i) -> __m256i {
/// 8-bit integer in `b` is negative, and returns the results.
/// Results are zeroed out when the corresponding element in `b` is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sign_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sign_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsignb))]
@@ -2513,7 +2513,7 @@ pub unsafe fn _mm256_sign_epi8(a: __m256i, b: __m256i) -> __m256i {
/// Shifts packed 16-bit integers in `a` left by `count` while
/// shifting in zeros, and returns the result
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sll_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sll_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsllw))]
@@ -2525,7 +2525,7 @@ pub unsafe fn _mm256_sll_epi16(a: __m256i, count: __m128i) -> __m256i {
/// Shifts packed 32-bit integers in `a` left by `count` while
/// shifting in zeros, and returns the result
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sll_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sll_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpslld))]
@@ -2537,7 +2537,7 @@ pub unsafe fn _mm256_sll_epi32(a: __m256i, count: __m128i) -> __m256i {
/// Shifts packed 64-bit integers in `a` left by `count` while
/// shifting in zeros, and returns the result
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sll_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sll_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsllq))]
@@ -2549,68 +2549,68 @@ pub unsafe fn _mm256_sll_epi64(a: __m256i, count: __m128i) -> __m256i {
/// Shifts packed 16-bit integers in `a` left by `IMM8` while
/// shifting in zeros, return the results;
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_slli_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsllw, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_slli_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(pslliw(a.as_i16x16(), IMM8))
}
/// Shifts packed 32-bit integers in `a` left by `IMM8` while
/// shifting in zeros, return the results;
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_slli_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpslld, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_slli_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(psllid(a.as_i32x8(), IMM8))
}
/// Shifts packed 64-bit integers in `a` left by `IMM8` while
/// shifting in zeros, return the results;
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_slli_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsllq, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_slli_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(pslliq(a.as_i64x4(), IMM8))
}
/// Shifts 128-bit lanes in `a` left by `imm8` bytes while shifting in zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_slli_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_slli_si256)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpslldq, IMM8 = 3))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_slli_si256<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
_mm256_bslli_epi128::<IMM8>(a)
}
/// Shifts 128-bit lanes in `a` left by `imm8` bytes while shifting in zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_bslli_epi128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_bslli_epi128)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpslldq, IMM8 = 3))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_bslli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
const fn mask(shift: i32, i: u32) -> u32 {
let shift = shift as u32 & 0xff;
if shift > 15 || i % 16 < shift {
@@ -2621,10 +2621,10 @@ pub unsafe fn _mm256_bslli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
}
let a = a.as_i8x32();
let zero = _mm256_setzero_si256().as_i8x32();
- let r: i8x32 = simd_shuffle32!(
+ let r: i8x32 = simd_shuffle!(
zero,
a,
- <const IMM8: i32> [
+ [
mask(IMM8, 0),
mask(IMM8, 1),
mask(IMM8, 2),
@@ -2666,7 +2666,7 @@ pub unsafe fn _mm256_bslli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
/// specified by the corresponding element in `count` while
/// shifting in zeros, and returns the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sllv_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sllv_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsllvd))]
@@ -2679,7 +2679,7 @@ pub unsafe fn _mm_sllv_epi32(a: __m128i, count: __m128i) -> __m128i {
/// specified by the corresponding element in `count` while
/// shifting in zeros, and returns the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sllv_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sllv_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsllvd))]
@@ -2692,7 +2692,7 @@ pub unsafe fn _mm256_sllv_epi32(a: __m256i, count: __m256i) -> __m256i {
/// specified by the corresponding element in `count` while
/// shifting in zeros, and returns the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sllv_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sllv_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsllvq))]
@@ -2705,7 +2705,7 @@ pub unsafe fn _mm_sllv_epi64(a: __m128i, count: __m128i) -> __m128i {
/// specified by the corresponding element in `count` while
/// shifting in zeros, and returns the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sllv_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sllv_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsllvq))]
@@ -2717,7 +2717,7 @@ pub unsafe fn _mm256_sllv_epi64(a: __m256i, count: __m256i) -> __m256i {
/// Shifts packed 16-bit integers in `a` right by `count` while
/// shifting in sign bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sra_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sra_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsraw))]
@@ -2729,7 +2729,7 @@ pub unsafe fn _mm256_sra_epi16(a: __m256i, count: __m128i) -> __m256i {
/// Shifts packed 32-bit integers in `a` right by `count` while
/// shifting in sign bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sra_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sra_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrad))]
@@ -2741,35 +2741,35 @@ pub unsafe fn _mm256_sra_epi32(a: __m256i, count: __m128i) -> __m256i {
/// Shifts packed 16-bit integers in `a` right by `IMM8` while
/// shifting in sign bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srai_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srai_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsraw, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srai_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(psraiw(a.as_i16x16(), IMM8))
}
/// Shifts packed 32-bit integers in `a` right by `IMM8` while
/// shifting in sign bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srai_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srai_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrad, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srai_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(psraid(a.as_i32x8(), IMM8))
}
/// Shifts packed 32-bit integers in `a` right by the amount specified by the
/// corresponding element in `count` while shifting in sign bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srav_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srav_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsravd))]
@@ -2781,7 +2781,7 @@ pub unsafe fn _mm_srav_epi32(a: __m128i, count: __m128i) -> __m128i {
/// Shifts packed 32-bit integers in `a` right by the amount specified by the
/// corresponding element in `count` while shifting in sign bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srav_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srav_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsravd))]
@@ -2792,31 +2792,31 @@ pub unsafe fn _mm256_srav_epi32(a: __m256i, count: __m256i) -> __m256i {
/// Shifts 128-bit lanes in `a` right by `imm8` bytes while shifting in zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srli_si256)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrldq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srli_si256<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
_mm256_bsrli_epi128::<IMM8>(a)
}
/// Shifts 128-bit lanes in `a` right by `imm8` bytes while shifting in zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_bsrli_epi128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_bsrli_epi128)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrldq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i8x32();
let zero = _mm256_setzero_si256().as_i8x32();
let r: i8x32 = match IMM8 % 16 {
- 0 => simd_shuffle32!(
+ 0 => simd_shuffle!(
a,
zero,
[
@@ -2824,7 +2824,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
23, 24, 25, 26, 27, 28, 29, 30, 31,
],
),
- 1 => simd_shuffle32!(
+ 1 => simd_shuffle!(
a,
zero,
[
@@ -2832,7 +2832,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
24, 25, 26, 27, 28, 29, 30, 31, 32,
],
),
- 2 => simd_shuffle32!(
+ 2 => simd_shuffle!(
a,
zero,
[
@@ -2840,7 +2840,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
25, 26, 27, 28, 29, 30, 31, 32, 32,
],
),
- 3 => simd_shuffle32!(
+ 3 => simd_shuffle!(
a,
zero,
[
@@ -2848,7 +2848,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
25, 26, 27, 28, 29, 30, 31, 32, 32, 32,
],
),
- 4 => simd_shuffle32!(
+ 4 => simd_shuffle!(
a,
zero,
[
@@ -2856,7 +2856,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
26, 27, 28, 29, 30, 31, 32, 32, 32, 32,
],
),
- 5 => simd_shuffle32!(
+ 5 => simd_shuffle!(
a,
zero,
[
@@ -2864,7 +2864,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
27, 28, 29, 30, 31, 32, 32, 32, 32, 32,
],
),
- 6 => simd_shuffle32!(
+ 6 => simd_shuffle!(
a,
zero,
[
@@ -2872,7 +2872,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
28, 29, 30, 31, 32, 32, 32, 32, 32, 32,
],
),
- 7 => simd_shuffle32!(
+ 7 => simd_shuffle!(
a,
zero,
[
@@ -2880,7 +2880,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
28, 29, 30, 31, 32, 32, 32, 32, 32, 32, 32,
],
),
- 8 => simd_shuffle32!(
+ 8 => simd_shuffle!(
a,
zero,
[
@@ -2888,7 +2888,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
29, 30, 31, 32, 32, 32, 32, 32, 32, 32, 32,
],
),
- 9 => simd_shuffle32!(
+ 9 => simd_shuffle!(
a,
zero,
[
@@ -2896,7 +2896,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
30, 31, 32, 32, 32, 32, 32, 32, 32, 32, 32,
],
),
- 10 => simd_shuffle32!(
+ 10 => simd_shuffle!(
a,
zero,
[
@@ -2904,7 +2904,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
31, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
],
),
- 11 => simd_shuffle32!(
+ 11 => simd_shuffle!(
a,
zero,
[
@@ -2912,7 +2912,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
],
),
- 12 => simd_shuffle32!(
+ 12 => simd_shuffle!(
a,
zero,
[
@@ -2920,7 +2920,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
],
),
- 13 => simd_shuffle32!(
+ 13 => simd_shuffle!(
a,
zero,
[
@@ -2928,7 +2928,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
],
),
- 14 => simd_shuffle32!(
+ 14 => simd_shuffle!(
a,
zero,
[
@@ -2936,7 +2936,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
],
),
- 15 => simd_shuffle32!(
+ 15 => simd_shuffle!(
a,
zero,
[
@@ -2952,7 +2952,7 @@ pub unsafe fn _mm256_bsrli_epi128<const IMM8: i32>(a: __m256i) -> __m256i {
/// Shifts packed 16-bit integers in `a` right by `count` while shifting in
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srl_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srl_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrlw))]
@@ -2964,7 +2964,7 @@ pub unsafe fn _mm256_srl_epi16(a: __m256i, count: __m128i) -> __m256i {
/// Shifts packed 32-bit integers in `a` right by `count` while shifting in
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srl_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srl_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrld))]
@@ -2976,7 +2976,7 @@ pub unsafe fn _mm256_srl_epi32(a: __m256i, count: __m128i) -> __m256i {
/// Shifts packed 64-bit integers in `a` right by `count` while shifting in
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srl_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srl_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrlq))]
@@ -2988,49 +2988,49 @@ pub unsafe fn _mm256_srl_epi64(a: __m256i, count: __m128i) -> __m256i {
/// Shifts packed 16-bit integers in `a` right by `IMM8` while shifting in
/// zeros
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srli_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srli_epi16<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(psrliw(a.as_i16x16(), IMM8))
}
/// Shifts packed 32-bit integers in `a` right by `IMM8` while shifting in
/// zeros
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srli_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrld, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srli_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(psrlid(a.as_i32x8(), IMM8))
}
/// Shifts packed 64-bit integers in `a` right by `IMM8` while shifting in
/// zeros
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srli_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srli_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_srli_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(psrliq(a.as_i64x4(), IMM8))
}
/// Shifts packed 32-bit integers in `a` right by the amount specified by
/// the corresponding element in `count` while shifting in zeros,
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srlv_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srlv_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrlvd))]
@@ -3042,7 +3042,7 @@ pub unsafe fn _mm_srlv_epi32(a: __m128i, count: __m128i) -> __m128i {
/// Shifts packed 32-bit integers in `a` right by the amount specified by
/// the corresponding element in `count` while shifting in zeros,
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srlv_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srlv_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrlvd))]
@@ -3054,7 +3054,7 @@ pub unsafe fn _mm256_srlv_epi32(a: __m256i, count: __m256i) -> __m256i {
/// Shifts packed 64-bit integers in `a` right by the amount specified by
/// the corresponding element in `count` while shifting in zeros,
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srlv_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srlv_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrlvq))]
@@ -3066,7 +3066,7 @@ pub unsafe fn _mm_srlv_epi64(a: __m128i, count: __m128i) -> __m128i {
/// Shifts packed 64-bit integers in `a` right by the amount specified by
/// the corresponding element in `count` while shifting in zeros,
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srlv_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srlv_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsrlvq))]
@@ -3079,7 +3079,7 @@ pub unsafe fn _mm256_srlv_epi64(a: __m256i, count: __m256i) -> __m256i {
/// Subtract packed 16-bit integers in `b` from packed 16-bit integers in `a`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sub_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sub_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsubw))]
@@ -3090,7 +3090,7 @@ pub unsafe fn _mm256_sub_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Subtract packed 32-bit integers in `b` from packed 32-bit integers in `a`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sub_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sub_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsubd))]
@@ -3101,7 +3101,7 @@ pub unsafe fn _mm256_sub_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Subtract packed 64-bit integers in `b` from packed 64-bit integers in `a`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sub_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sub_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsubq))]
@@ -3112,7 +3112,7 @@ pub unsafe fn _mm256_sub_epi64(a: __m256i, b: __m256i) -> __m256i {
/// Subtract packed 8-bit integers in `b` from packed 8-bit integers in `a`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sub_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sub_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsubb))]
@@ -3124,7 +3124,7 @@ pub unsafe fn _mm256_sub_epi8(a: __m256i, b: __m256i) -> __m256i {
/// Subtract packed 16-bit integers in `b` from packed 16-bit integers in
/// `a` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_subs_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_subs_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsubsw))]
@@ -3136,7 +3136,7 @@ pub unsafe fn _mm256_subs_epi16(a: __m256i, b: __m256i) -> __m256i {
/// Subtract packed 8-bit integers in `b` from packed 8-bit integers in
/// `a` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_subs_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_subs_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsubsb))]
@@ -3148,7 +3148,7 @@ pub unsafe fn _mm256_subs_epi8(a: __m256i, b: __m256i) -> __m256i {
/// Subtract packed unsigned 16-bit integers in `b` from packed 16-bit
/// integers in `a` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_subs_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_subs_epu16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsubusw))]
@@ -3160,7 +3160,7 @@ pub unsafe fn _mm256_subs_epu16(a: __m256i, b: __m256i) -> __m256i {
/// Subtract packed unsigned 8-bit integers in `b` from packed 8-bit
/// integers in `a` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_subs_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_subs_epu8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpsubusb))]
@@ -3207,14 +3207,14 @@ pub unsafe fn _mm256_subs_epu8(a: __m256i, b: __m256i) -> __m256i {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpackhi_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_unpackhi_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpunpckhbw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_unpackhi_epi8(a: __m256i, b: __m256i) -> __m256i {
#[rustfmt::skip]
- let r: i8x32 = simd_shuffle32!(a.as_i8x32(), b.as_i8x32(), [
+ let r: i8x32 = simd_shuffle!(a.as_i8x32(), b.as_i8x32(), [
8, 40, 9, 41, 10, 42, 11, 43,
12, 44, 13, 45, 14, 46, 15, 47,
24, 56, 25, 57, 26, 58, 27, 59,
@@ -3260,14 +3260,14 @@ pub unsafe fn _mm256_unpackhi_epi8(a: __m256i, b: __m256i) -> __m256i {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpacklo_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_unpacklo_epi8)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpunpcklbw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_unpacklo_epi8(a: __m256i, b: __m256i) -> __m256i {
#[rustfmt::skip]
- let r: i8x32 = simd_shuffle32!(a.as_i8x32(), b.as_i8x32(), [
+ let r: i8x32 = simd_shuffle!(a.as_i8x32(), b.as_i8x32(), [
0, 32, 1, 33, 2, 34, 3, 35,
4, 36, 5, 37, 6, 38, 7, 39,
16, 48, 17, 49, 18, 50, 19, 51,
@@ -3309,13 +3309,13 @@ pub unsafe fn _mm256_unpacklo_epi8(a: __m256i, b: __m256i) -> __m256i {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpackhi_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_unpackhi_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpunpckhwd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_unpackhi_epi16(a: __m256i, b: __m256i) -> __m256i {
- let r: i16x16 = simd_shuffle16!(
+ let r: i16x16 = simd_shuffle!(
a.as_i16x16(),
b.as_i16x16(),
[4, 20, 5, 21, 6, 22, 7, 23, 12, 28, 13, 29, 14, 30, 15, 31],
@@ -3357,13 +3357,13 @@ pub unsafe fn _mm256_unpackhi_epi16(a: __m256i, b: __m256i) -> __m256i {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpacklo_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_unpacklo_epi16)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vpunpcklwd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_unpacklo_epi16(a: __m256i, b: __m256i) -> __m256i {
- let r: i16x16 = simd_shuffle16!(
+ let r: i16x16 = simd_shuffle!(
a.as_i16x16(),
b.as_i16x16(),
[0, 16, 1, 17, 2, 18, 3, 19, 8, 24, 9, 25, 10, 26, 11, 27],
@@ -3398,13 +3398,13 @@ pub unsafe fn _mm256_unpacklo_epi16(a: __m256i, b: __m256i) -> __m256i {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpackhi_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_unpackhi_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vunpckhps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_unpackhi_epi32(a: __m256i, b: __m256i) -> __m256i {
- let r: i32x8 = simd_shuffle8!(a.as_i32x8(), b.as_i32x8(), [2, 10, 3, 11, 6, 14, 7, 15]);
+ let r: i32x8 = simd_shuffle!(a.as_i32x8(), b.as_i32x8(), [2, 10, 3, 11, 6, 14, 7, 15]);
transmute(r)
}
@@ -3435,13 +3435,13 @@ pub unsafe fn _mm256_unpackhi_epi32(a: __m256i, b: __m256i) -> __m256i {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpacklo_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_unpacklo_epi32)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vunpcklps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_unpacklo_epi32(a: __m256i, b: __m256i) -> __m256i {
- let r: i32x8 = simd_shuffle8!(a.as_i32x8(), b.as_i32x8(), [0, 8, 1, 9, 4, 12, 5, 13]);
+ let r: i32x8 = simd_shuffle!(a.as_i32x8(), b.as_i32x8(), [0, 8, 1, 9, 4, 12, 5, 13]);
transmute(r)
}
@@ -3472,13 +3472,13 @@ pub unsafe fn _mm256_unpacklo_epi32(a: __m256i, b: __m256i) -> __m256i {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpackhi_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_unpackhi_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vunpckhpd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_unpackhi_epi64(a: __m256i, b: __m256i) -> __m256i {
- let r: i64x4 = simd_shuffle4!(a.as_i64x4(), b.as_i64x4(), [1, 5, 3, 7]);
+ let r: i64x4 = simd_shuffle!(a.as_i64x4(), b.as_i64x4(), [1, 5, 3, 7]);
transmute(r)
}
@@ -3509,20 +3509,20 @@ pub unsafe fn _mm256_unpackhi_epi64(a: __m256i, b: __m256i) -> __m256i {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpacklo_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_unpacklo_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vunpcklpd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_unpacklo_epi64(a: __m256i, b: __m256i) -> __m256i {
- let r: i64x4 = simd_shuffle4!(a.as_i64x4(), b.as_i64x4(), [0, 4, 2, 6]);
+ let r: i64x4 = simd_shuffle!(a.as_i64x4(), b.as_i64x4(), [0, 4, 2, 6]);
transmute(r)
}
/// Computes the bitwise XOR of 256 bits (representing integer data)
/// in `a` and `b`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_xor_si256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_xor_si256)
#[inline]
#[target_feature(enable = "avx2")]
#[cfg_attr(test, assert_instr(vxorps))]
@@ -3536,14 +3536,14 @@ pub unsafe fn _mm256_xor_si256(a: __m256i, b: __m256i) -> __m256i {
///
/// See [LLVM commit D20468](https://reviews.llvm.org/D20468).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extract_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extract_epi8)
#[inline]
#[target_feature(enable = "avx2")]
// This intrinsic has no corresponding instruction.
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_extract_epi8<const INDEX: i32>(a: __m256i) -> i32 {
- static_assert_imm5!(INDEX);
+ static_assert_uimm_bits!(INDEX, 5);
simd_extract::<_, u8>(a.as_u8x32(), INDEX as u32) as i32
}
@@ -3552,33 +3552,33 @@ pub unsafe fn _mm256_extract_epi8<const INDEX: i32>(a: __m256i) -> i32 {
///
/// See [LLVM commit D20468](https://reviews.llvm.org/D20468).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extract_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extract_epi16)
#[inline]
#[target_feature(enable = "avx2")]
// This intrinsic has no corresponding instruction.
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_extract_epi16<const INDEX: i32>(a: __m256i) -> i32 {
- static_assert_imm4!(INDEX);
+ static_assert_uimm_bits!(INDEX, 4);
simd_extract::<_, u16>(a.as_u16x16(), INDEX as u32) as i32
}
/// Extracts a 32-bit integer from `a`, selected with `INDEX`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extract_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extract_epi32)
#[inline]
#[target_feature(enable = "avx2")]
// This intrinsic has no corresponding instruction.
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_extract_epi32<const INDEX: i32>(a: __m256i) -> i32 {
- static_assert_imm3!(INDEX);
+ static_assert_uimm_bits!(INDEX, 3);
simd_extract(a.as_i32x8(), INDEX as u32)
}
/// Returns the first element of the input vector of `[4 x double]`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsd_f64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsd_f64)
#[inline]
#[target_feature(enable = "avx2")]
//#[cfg_attr(test, assert_instr(movsd))] FIXME
@@ -3589,7 +3589,7 @@ pub unsafe fn _mm256_cvtsd_f64(a: __m256d) -> f64 {
/// Returns the first element of the input vector of `[8 x i32]`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsi256_si32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsi256_si32)
#[inline]
#[target_feature(enable = "avx2")]
//#[cfg_attr(test, assert_instr(movd))] FIXME
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs b/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs
index 1099ee2cb..92e572eb1 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs
@@ -58,7 +58,7 @@ extern "C" {
/// For each packed 16-bit integer maps the value to the number of logical 1 bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_popcnt_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_popcnt_epi16)
#[inline]
#[target_feature(enable = "avx512bitalg")]
#[cfg_attr(test, assert_instr(vpopcntw))]
@@ -71,7 +71,7 @@ pub unsafe fn _mm512_popcnt_epi16(a: __m512i) -> __m512i {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_popcnt_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_popcnt_epi16)
#[inline]
#[target_feature(enable = "avx512bitalg")]
#[cfg_attr(test, assert_instr(vpopcntw))]
@@ -85,7 +85,7 @@ pub unsafe fn _mm512_maskz_popcnt_epi16(k: __mmask32, a: __m512i) -> __m512i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_popcnt_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_popcnt_epi16)
#[inline]
#[target_feature(enable = "avx512bitalg")]
#[cfg_attr(test, assert_instr(vpopcntw))]
@@ -99,7 +99,7 @@ pub unsafe fn _mm512_mask_popcnt_epi16(src: __m512i, k: __mmask32, a: __m512i) -
/// For each packed 16-bit integer maps the value to the number of logical 1 bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_popcnt_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_popcnt_epi16)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntw))]
@@ -112,7 +112,7 @@ pub unsafe fn _mm256_popcnt_epi16(a: __m256i) -> __m256i {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_popcnt_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_popcnt_epi16)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntw))]
@@ -126,7 +126,7 @@ pub unsafe fn _mm256_maskz_popcnt_epi16(k: __mmask16, a: __m256i) -> __m256i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_popcnt_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_popcnt_epi16)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntw))]
@@ -140,7 +140,7 @@ pub unsafe fn _mm256_mask_popcnt_epi16(src: __m256i, k: __mmask16, a: __m256i) -
/// For each packed 16-bit integer maps the value to the number of logical 1 bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_epi16)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntw))]
@@ -153,7 +153,7 @@ pub unsafe fn _mm_popcnt_epi16(a: __m128i) -> __m128i {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_popcnt_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_popcnt_epi16)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntw))]
@@ -167,7 +167,7 @@ pub unsafe fn _mm_maskz_popcnt_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_popcnt_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_popcnt_epi16)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntw))]
@@ -181,7 +181,7 @@ pub unsafe fn _mm_mask_popcnt_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __
/// For each packed 8-bit integer maps the value to the number of logical 1 bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_popcnt_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_popcnt_epi8)
#[inline]
#[target_feature(enable = "avx512bitalg")]
#[cfg_attr(test, assert_instr(vpopcntb))]
@@ -194,7 +194,7 @@ pub unsafe fn _mm512_popcnt_epi8(a: __m512i) -> __m512i {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_popcnt_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_popcnt_epi8)
#[inline]
#[target_feature(enable = "avx512bitalg")]
#[cfg_attr(test, assert_instr(vpopcntb))]
@@ -208,7 +208,7 @@ pub unsafe fn _mm512_maskz_popcnt_epi8(k: __mmask64, a: __m512i) -> __m512i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_popcnt_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_popcnt_epi8)
#[inline]
#[target_feature(enable = "avx512bitalg")]
#[cfg_attr(test, assert_instr(vpopcntb))]
@@ -222,7 +222,7 @@ pub unsafe fn _mm512_mask_popcnt_epi8(src: __m512i, k: __mmask64, a: __m512i) ->
/// For each packed 8-bit integer maps the value to the number of logical 1 bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_popcnt_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_popcnt_epi8)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntb))]
@@ -235,7 +235,7 @@ pub unsafe fn _mm256_popcnt_epi8(a: __m256i) -> __m256i {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_popcnt_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_popcnt_epi8)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntb))]
@@ -249,7 +249,7 @@ pub unsafe fn _mm256_maskz_popcnt_epi8(k: __mmask32, a: __m256i) -> __m256i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_popcnt_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_popcnt_epi8)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntb))]
@@ -263,7 +263,7 @@ pub unsafe fn _mm256_mask_popcnt_epi8(src: __m256i, k: __mmask32, a: __m256i) ->
/// For each packed 8-bit integer maps the value to the number of logical 1 bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_epi8)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntb))]
@@ -276,7 +276,7 @@ pub unsafe fn _mm_popcnt_epi8(a: __m128i) -> __m128i {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_popcnt_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_popcnt_epi8)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntb))]
@@ -290,7 +290,7 @@ pub unsafe fn _mm_maskz_popcnt_epi8(k: __mmask16, a: __m128i) -> __m128i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_popcnt_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_popcnt_epi8)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntb))]
@@ -306,7 +306,7 @@ pub unsafe fn _mm_mask_popcnt_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __
/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer.
/// It then selects these bits and packs them into the output.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_bitshuffle_epi64_mask)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_bitshuffle_epi64_mask)
#[inline]
#[target_feature(enable = "avx512bitalg")]
#[cfg_attr(test, assert_instr(vpshufbitqmb))]
@@ -321,7 +321,7 @@ pub unsafe fn _mm512_bitshuffle_epi64_mask(b: __m512i, c: __m512i) -> __mmask64
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_bitshuffle_epi64_mask)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_bitshuffle_epi64_mask)
#[inline]
#[target_feature(enable = "avx512bitalg")]
#[cfg_attr(test, assert_instr(vpshufbitqmb))]
@@ -333,7 +333,7 @@ pub unsafe fn _mm512_mask_bitshuffle_epi64_mask(k: __mmask64, b: __m512i, c: __m
/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer.
/// It then selects these bits and packs them into the output.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_bitshuffle_epi64_mask)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_bitshuffle_epi64_mask)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufbitqmb))]
@@ -348,7 +348,7 @@ pub unsafe fn _mm256_bitshuffle_epi64_mask(b: __m256i, c: __m256i) -> __mmask32
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_bitshuffle_epi64_mask)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_bitshuffle_epi64_mask)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufbitqmb))]
@@ -360,7 +360,7 @@ pub unsafe fn _mm256_mask_bitshuffle_epi64_mask(k: __mmask32, b: __m256i, c: __m
/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer.
/// It then selects these bits and packs them into the output.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_bitshuffle_epi64_mask)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_bitshuffle_epi64_mask)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufbitqmb))]
@@ -375,7 +375,7 @@ pub unsafe fn _mm_bitshuffle_epi64_mask(b: __m128i, c: __m128i) -> __mmask16 {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_bitshuffle_epi64_mask)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_bitshuffle_epi64_mask)
#[inline]
#[target_feature(enable = "avx512bitalg,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufbitqmb))]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
index fbf71dfc4..0ef919617 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs
@@ -12,7 +12,7 @@ use super::avx512f::{vpl, vps};
/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_abs_epi16&expand=30)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_epi16&expand=30)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpabsw))]
@@ -27,7 +27,7 @@ pub unsafe fn _mm512_abs_epi16(a: __m512i) -> __m512i {
/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_abs_epi16&expand=31)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_epi16&expand=31)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpabsw))]
@@ -38,7 +38,7 @@ pub unsafe fn _mm512_mask_abs_epi16(src: __m512i, k: __mmask32, a: __m512i) -> _
/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_abs_epi16&expand=32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_abs_epi16&expand=32)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpabsw))]
@@ -50,7 +50,7 @@ pub unsafe fn _mm512_maskz_abs_epi16(k: __mmask32, a: __m512i) -> __m512i {
/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_abs_epi16&expand=28)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_abs_epi16&expand=28)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsw))]
@@ -61,7 +61,7 @@ pub unsafe fn _mm256_mask_abs_epi16(src: __m256i, k: __mmask16, a: __m256i) -> _
/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_abs_epi16&expand=29)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_abs_epi16&expand=29)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsw))]
@@ -73,7 +73,7 @@ pub unsafe fn _mm256_maskz_abs_epi16(k: __mmask16, a: __m256i) -> __m256i {
/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_abs_epi16&expand=25)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_abs_epi16&expand=25)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsw))]
@@ -84,7 +84,7 @@ pub unsafe fn _mm_mask_abs_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m12
/// Compute the absolute value of packed signed 16-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_abs_epi16&expand=26)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_abs_epi16&expand=26)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsw))]
@@ -96,7 +96,7 @@ pub unsafe fn _mm_maskz_abs_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_abs_epi8&expand=57)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_epi8&expand=57)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpabsb))]
@@ -111,7 +111,7 @@ pub unsafe fn _mm512_abs_epi8(a: __m512i) -> __m512i {
/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_abs_epi8&expand=58)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_epi8&expand=58)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpabsb))]
@@ -122,7 +122,7 @@ pub unsafe fn _mm512_mask_abs_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __
/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_abs_epi8&expand=59)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_abs_epi8&expand=59)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpabsb))]
@@ -134,7 +134,7 @@ pub unsafe fn _mm512_maskz_abs_epi8(k: __mmask64, a: __m512i) -> __m512i {
/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_abs_epi8&expand=55)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_abs_epi8&expand=55)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsb))]
@@ -145,7 +145,7 @@ pub unsafe fn _mm256_mask_abs_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __
/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_abs_epi8&expand=56)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_abs_epi8&expand=56)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsb))]
@@ -157,7 +157,7 @@ pub unsafe fn _mm256_maskz_abs_epi8(k: __mmask32, a: __m256i) -> __m256i {
/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_abs_epi8&expand=52)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_abs_epi8&expand=52)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsb))]
@@ -168,7 +168,7 @@ pub unsafe fn _mm_mask_abs_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m12
/// Compute the absolute value of packed signed 8-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_abs_epi8&expand=53)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_abs_epi8&expand=53)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsb))]
@@ -180,7 +180,7 @@ pub unsafe fn _mm_maskz_abs_epi8(k: __mmask16, a: __m128i) -> __m128i {
/// Add packed 16-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_add_epi16&expand=91)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_epi16&expand=91)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddw))]
@@ -190,7 +190,7 @@ pub unsafe fn _mm512_add_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Add packed 16-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_add_epi16&expand=92)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_epi16&expand=92)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddw))]
@@ -201,7 +201,7 @@ pub unsafe fn _mm512_mask_add_epi16(src: __m512i, k: __mmask32, a: __m512i, b: _
/// Add packed 16-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_add_epi16&expand=93)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_epi16&expand=93)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddw))]
@@ -213,7 +213,7 @@ pub unsafe fn _mm512_maskz_add_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __
/// Add packed 16-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_add_epi&expand=89)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_epi&expand=89)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddw))]
@@ -224,7 +224,7 @@ pub unsafe fn _mm256_mask_add_epi16(src: __m256i, k: __mmask16, a: __m256i, b: _
/// Add packed 16-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_add_epi16&expand=90)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_epi16&expand=90)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddw))]
@@ -236,7 +236,7 @@ pub unsafe fn _mm256_maskz_add_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __
/// Add packed 16-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_add_epi16&expand=86)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_epi16&expand=86)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddw))]
@@ -247,7 +247,7 @@ pub unsafe fn _mm_mask_add_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Add packed 16-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_epi16&expand=87)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_epi16&expand=87)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddw))]
@@ -259,7 +259,7 @@ pub unsafe fn _mm_maskz_add_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Add packed 8-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_add_epi8&expand=118)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_epi8&expand=118)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddb))]
@@ -269,7 +269,7 @@ pub unsafe fn _mm512_add_epi8(a: __m512i, b: __m512i) -> __m512i {
/// Add packed 8-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_add_epi8&expand=119)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_epi8&expand=119)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddb))]
@@ -280,7 +280,7 @@ pub unsafe fn _mm512_mask_add_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __
/// Add packed 8-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_add_epi8&expand=120)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_epi8&expand=120)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddb))]
@@ -292,7 +292,7 @@ pub unsafe fn _mm512_maskz_add_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m
/// Add packed 8-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_add_epi8&expand=116)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_epi8&expand=116)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddb))]
@@ -303,7 +303,7 @@ pub unsafe fn _mm256_mask_add_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __
/// Add packed 8-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_add_epi8&expand=117)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_epi8&expand=117)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddb))]
@@ -315,7 +315,7 @@ pub unsafe fn _mm256_maskz_add_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m
/// Add packed 8-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_add_epi8&expand=113)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_epi8&expand=113)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddb))]
@@ -326,7 +326,7 @@ pub unsafe fn _mm_mask_add_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m12
/// Add packed 8-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_epi8&expand=114)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_epi8&expand=114)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddb))]
@@ -338,7 +338,7 @@ pub unsafe fn _mm_maskz_add_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128
/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_adds_epu16&expand=197)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_adds_epu16&expand=197)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddusw))]
@@ -353,7 +353,7 @@ pub unsafe fn _mm512_adds_epu16(a: __m512i, b: __m512i) -> __m512i {
/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_adds_epu16&expand=198)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_adds_epu16&expand=198)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddusw))]
@@ -368,7 +368,7 @@ pub unsafe fn _mm512_mask_adds_epu16(
/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_adds_epu16&expand=199)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_adds_epu16&expand=199)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddusw))]
@@ -383,7 +383,7 @@ pub unsafe fn _mm512_maskz_adds_epu16(k: __mmask32, a: __m512i, b: __m512i) -> _
/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_adds_epu16&expand=195)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_adds_epu16&expand=195)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddusw))]
@@ -403,7 +403,7 @@ pub unsafe fn _mm256_mask_adds_epu16(
/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_adds_epu16&expand=196)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_adds_epu16&expand=196)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddusw))]
@@ -418,7 +418,7 @@ pub unsafe fn _mm256_maskz_adds_epu16(k: __mmask16, a: __m256i, b: __m256i) -> _
/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_adds_epu16&expand=192)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_adds_epu16&expand=192)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddusw))]
@@ -428,7 +428,7 @@ pub unsafe fn _mm_mask_adds_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m1
/// Add packed unsigned 16-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_adds_epu16&expand=193)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_adds_epu16&expand=193)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddusw))]
@@ -443,7 +443,7 @@ pub unsafe fn _mm_maskz_adds_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m12
/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_adds_epu8&expand=206)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_adds_epu8&expand=206)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddusb))]
@@ -458,7 +458,7 @@ pub unsafe fn _mm512_adds_epu8(a: __m512i, b: __m512i) -> __m512i {
/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_adds_epu8&expand=207)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_adds_epu8&expand=207)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddusb))]
@@ -468,7 +468,7 @@ pub unsafe fn _mm512_mask_adds_epu8(src: __m512i, k: __mmask64, a: __m512i, b: _
/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_adds_epu8&expand=208)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_adds_epu8&expand=208)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddusb))]
@@ -483,7 +483,7 @@ pub unsafe fn _mm512_maskz_adds_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __
/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_adds_epu8&expand=204)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_adds_epu8&expand=204)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddusb))]
@@ -493,7 +493,7 @@ pub unsafe fn _mm256_mask_adds_epu8(src: __m256i, k: __mmask32, a: __m256i, b: _
/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_adds_epu8&expand=205)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_adds_epu8&expand=205)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddusb))]
@@ -508,7 +508,7 @@ pub unsafe fn _mm256_maskz_adds_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __
/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_adds_epu8&expand=201)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_adds_epu8&expand=201)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddusb))]
@@ -518,7 +518,7 @@ pub unsafe fn _mm_mask_adds_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m1
/// Add packed unsigned 8-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_adds_epu8&expand=202)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_adds_epu8&expand=202)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddusb))]
@@ -533,7 +533,7 @@ pub unsafe fn _mm_maskz_adds_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m12
/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_adds_epi16&expand=179)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_adds_epi16&expand=179)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddsw))]
@@ -548,7 +548,7 @@ pub unsafe fn _mm512_adds_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_adds_epi16&expand=180)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_adds_epi16&expand=180)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddsw))]
@@ -563,7 +563,7 @@ pub unsafe fn _mm512_mask_adds_epi16(
/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_adds_epi16&expand=181)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_adds_epi16&expand=181)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddsw))]
@@ -578,7 +578,7 @@ pub unsafe fn _mm512_maskz_adds_epi16(k: __mmask32, a: __m512i, b: __m512i) -> _
/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_adds_epi16&expand=177)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_adds_epi16&expand=177)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddsw))]
@@ -593,7 +593,7 @@ pub unsafe fn _mm256_mask_adds_epi16(
/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_adds_epi16&expand=178)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_adds_epi16&expand=178)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddsw))]
@@ -608,7 +608,7 @@ pub unsafe fn _mm256_maskz_adds_epi16(k: __mmask16, a: __m256i, b: __m256i) -> _
/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_adds_epi16&expand=174)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_adds_epi16&expand=174)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddsw))]
@@ -618,7 +618,7 @@ pub unsafe fn _mm_mask_adds_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m1
/// Add packed signed 16-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_adds_epi16&expand=175)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_adds_epi16&expand=175)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddsw))]
@@ -633,7 +633,7 @@ pub unsafe fn _mm_maskz_adds_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m12
/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_adds_epi8&expand=188)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_adds_epi8&expand=188)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddsb))]
@@ -648,7 +648,7 @@ pub unsafe fn _mm512_adds_epi8(a: __m512i, b: __m512i) -> __m512i {
/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_adds_epi8&expand=189)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_adds_epi8&expand=189)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddsb))]
@@ -658,7 +658,7 @@ pub unsafe fn _mm512_mask_adds_epi8(src: __m512i, k: __mmask64, a: __m512i, b: _
/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_adds_epi8&expand=190)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_adds_epi8&expand=190)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpaddsb))]
@@ -673,7 +673,7 @@ pub unsafe fn _mm512_maskz_adds_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __
/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_adds_epi8&expand=186)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_adds_epi8&expand=186)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddsb))]
@@ -683,7 +683,7 @@ pub unsafe fn _mm256_mask_adds_epi8(src: __m256i, k: __mmask32, a: __m256i, b: _
/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_adds_epi8&expand=187)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_adds_epi8&expand=187)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddsb))]
@@ -698,7 +698,7 @@ pub unsafe fn _mm256_maskz_adds_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __
/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_adds_epi8&expand=183)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_adds_epi8&expand=183)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddsb))]
@@ -708,7 +708,7 @@ pub unsafe fn _mm_mask_adds_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m1
/// Add packed signed 8-bit integers in a and b using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_adds_epi8&expand=184)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_adds_epi8&expand=184)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddsb))]
@@ -723,7 +723,7 @@ pub unsafe fn _mm_maskz_adds_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m12
/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sub_epi16&expand=5685)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_epi16&expand=5685)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubw))]
@@ -733,7 +733,7 @@ pub unsafe fn _mm512_sub_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sub_epi16&expand=5683)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_epi16&expand=5683)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubw))]
@@ -744,7 +744,7 @@ pub unsafe fn _mm512_mask_sub_epi16(src: __m512i, k: __mmask32, a: __m512i, b: _
/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sub_epi16&expand=5684)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_epi16&expand=5684)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubw))]
@@ -756,7 +756,7 @@ pub unsafe fn _mm512_maskz_sub_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __
/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sub_epi16&expand=5680)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_epi16&expand=5680)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubw))]
@@ -767,7 +767,7 @@ pub unsafe fn _mm256_mask_sub_epi16(src: __m256i, k: __mmask16, a: __m256i, b: _
/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sub_epi16&expand=5681)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_epi16&expand=5681)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubw))]
@@ -779,7 +779,7 @@ pub unsafe fn _mm256_maskz_sub_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __
/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sub_epi16&expand=5677)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_epi16&expand=5677)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubw))]
@@ -790,7 +790,7 @@ pub unsafe fn _mm_mask_sub_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sub_epi16&expand=5678)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_epi16&expand=5678)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubw))]
@@ -802,7 +802,7 @@ pub unsafe fn _mm_maskz_sub_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sub_epi8&expand=5712)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_epi8&expand=5712)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubb))]
@@ -812,7 +812,7 @@ pub unsafe fn _mm512_sub_epi8(a: __m512i, b: __m512i) -> __m512i {
/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sub_epi8&expand=5710)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_epi8&expand=5710)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubb))]
@@ -823,7 +823,7 @@ pub unsafe fn _mm512_mask_sub_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __
/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sub_epi8&expand=5711)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_epi8&expand=5711)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubb))]
@@ -835,7 +835,7 @@ pub unsafe fn _mm512_maskz_sub_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m
/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sub_epi8&expand=5707)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_epi8&expand=5707)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubb))]
@@ -846,7 +846,7 @@ pub unsafe fn _mm256_mask_sub_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __
/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sub_epi8&expand=5708)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_epi8&expand=5708)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubb))]
@@ -858,7 +858,7 @@ pub unsafe fn _mm256_maskz_sub_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m
/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sub_epi8&expand=5704)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_epi8&expand=5704)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubb))]
@@ -869,7 +869,7 @@ pub unsafe fn _mm_mask_sub_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m12
/// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sub_epi8&expand=5705)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_epi8&expand=5705)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubb))]
@@ -881,7 +881,7 @@ pub unsafe fn _mm_maskz_sub_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128
/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_subs_epu16&expand=5793)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_subs_epu16&expand=5793)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubusw))]
@@ -896,7 +896,7 @@ pub unsafe fn _mm512_subs_epu16(a: __m512i, b: __m512i) -> __m512i {
/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_subs_epu16&expand=5791)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_subs_epu16&expand=5791)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubusw))]
@@ -911,7 +911,7 @@ pub unsafe fn _mm512_mask_subs_epu16(
/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_subs_epu16&expand=5792)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_subs_epu16&expand=5792)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubusw))]
@@ -926,7 +926,7 @@ pub unsafe fn _mm512_maskz_subs_epu16(k: __mmask32, a: __m512i, b: __m512i) -> _
/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_subs_epu16&expand=5788)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_subs_epu16&expand=5788)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubusw))]
@@ -946,7 +946,7 @@ pub unsafe fn _mm256_mask_subs_epu16(
/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_subs_epu16&expand=5789)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_subs_epu16&expand=5789)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubusw))]
@@ -961,7 +961,7 @@ pub unsafe fn _mm256_maskz_subs_epu16(k: __mmask16, a: __m256i, b: __m256i) -> _
/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_subs_epu16&expand=5785)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_subs_epu16&expand=5785)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubusw))]
@@ -971,7 +971,7 @@ pub unsafe fn _mm_mask_subs_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m1
/// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_subs_epu16&expand=5786)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_subs_epu16&expand=5786)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubusw))]
@@ -986,7 +986,7 @@ pub unsafe fn _mm_maskz_subs_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m12
/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_subs_epu8&expand=5802)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_subs_epu8&expand=5802)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubusb))]
@@ -1001,7 +1001,7 @@ pub unsafe fn _mm512_subs_epu8(a: __m512i, b: __m512i) -> __m512i {
/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_subs_epu8&expand=5800)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_subs_epu8&expand=5800)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubusb))]
@@ -1011,7 +1011,7 @@ pub unsafe fn _mm512_mask_subs_epu8(src: __m512i, k: __mmask64, a: __m512i, b: _
/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_subs_epu8&expand=5801)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_subs_epu8&expand=5801)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubusb))]
@@ -1026,7 +1026,7 @@ pub unsafe fn _mm512_maskz_subs_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __
/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_subs_epu8&expand=5797)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_subs_epu8&expand=5797)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubusb))]
@@ -1036,7 +1036,7 @@ pub unsafe fn _mm256_mask_subs_epu8(src: __m256i, k: __mmask32, a: __m256i, b: _
/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_subs_epu8&expand=5798)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_subs_epu8&expand=5798)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubusb))]
@@ -1051,7 +1051,7 @@ pub unsafe fn _mm256_maskz_subs_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __
/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_subs_epu8&expand=5794)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_subs_epu8&expand=5794)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubusb))]
@@ -1061,7 +1061,7 @@ pub unsafe fn _mm_mask_subs_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m1
/// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_subs_epu8&expand=5795)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_subs_epu8&expand=5795)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubusb))]
@@ -1076,7 +1076,7 @@ pub unsafe fn _mm_maskz_subs_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m12
/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_subs_epi16&expand=5775)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_subs_epi16&expand=5775)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubsw))]
@@ -1091,7 +1091,7 @@ pub unsafe fn _mm512_subs_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_subs_epi16&expand=5773)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_subs_epi16&expand=5773)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubsw))]
@@ -1106,7 +1106,7 @@ pub unsafe fn _mm512_mask_subs_epi16(
/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_subs_epi16&expand=5774)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_subs_epi16&expand=5774)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubsw))]
@@ -1121,7 +1121,7 @@ pub unsafe fn _mm512_maskz_subs_epi16(k: __mmask32, a: __m512i, b: __m512i) -> _
/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_subs_epi16&expand=5770)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_subs_epi16&expand=5770)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubsw))]
@@ -1136,7 +1136,7 @@ pub unsafe fn _mm256_mask_subs_epi16(
/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_subs_epi16&expand=5771)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_subs_epi16&expand=5771)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubsw))]
@@ -1151,7 +1151,7 @@ pub unsafe fn _mm256_maskz_subs_epi16(k: __mmask16, a: __m256i, b: __m256i) -> _
/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_subs_epi16&expand=5767)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_subs_epi16&expand=5767)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubsw))]
@@ -1161,7 +1161,7 @@ pub unsafe fn _mm_mask_subs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m1
/// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_subs_epi16&expand=5768)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_subs_epi16&expand=5768)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubsw))]
@@ -1176,7 +1176,7 @@ pub unsafe fn _mm_maskz_subs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m12
/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_subs_epi8&expand=5784)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_subs_epi8&expand=5784)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubsb))]
@@ -1191,7 +1191,7 @@ pub unsafe fn _mm512_subs_epi8(a: __m512i, b: __m512i) -> __m512i {
/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_subs_epi8&expand=5782)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_subs_epi8&expand=5782)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubsb))]
@@ -1201,7 +1201,7 @@ pub unsafe fn _mm512_mask_subs_epi8(src: __m512i, k: __mmask64, a: __m512i, b: _
/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_subs_epi8&expand=5783)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_subs_epi8&expand=5783)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsubsb))]
@@ -1216,7 +1216,7 @@ pub unsafe fn _mm512_maskz_subs_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __
/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_subs_epi8&expand=5779)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_subs_epi8&expand=5779)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubsb))]
@@ -1226,7 +1226,7 @@ pub unsafe fn _mm256_mask_subs_epi8(src: __m256i, k: __mmask32, a: __m256i, b: _
/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_subs_epi8&expand=5780)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_subs_epi8&expand=5780)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubsb))]
@@ -1241,7 +1241,7 @@ pub unsafe fn _mm256_maskz_subs_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __
/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_subs_epi8&expand=5776)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_subs_epi8&expand=5776)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubsb))]
@@ -1251,7 +1251,7 @@ pub unsafe fn _mm_mask_subs_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m1
/// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a using saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_subs_epi8&expand=5777)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_subs_epi8&expand=5777)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubsb))]
@@ -1266,7 +1266,7 @@ pub unsafe fn _mm_maskz_subs_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m12
/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mulhi_epu16&expand=3973)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mulhi_epu16&expand=3973)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmulhuw))]
@@ -1276,7 +1276,7 @@ pub unsafe fn _mm512_mulhi_epu16(a: __m512i, b: __m512i) -> __m512i {
/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mulhi_epu16&expand=3971)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mulhi_epu16&expand=3971)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmulhuw))]
@@ -1292,7 +1292,7 @@ pub unsafe fn _mm512_mask_mulhi_epu16(
/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mulhi_epu16&expand=3972)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mulhi_epu16&expand=3972)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmulhuw))]
@@ -1304,7 +1304,7 @@ pub unsafe fn _mm512_maskz_mulhi_epu16(k: __mmask32, a: __m512i, b: __m512i) ->
/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mulhi_epu16&expand=3968)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mulhi_epu16&expand=3968)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulhuw))]
@@ -1320,7 +1320,7 @@ pub unsafe fn _mm256_mask_mulhi_epu16(
/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mulhi_epu16&expand=3969)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mulhi_epu16&expand=3969)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulhuw))]
@@ -1332,7 +1332,7 @@ pub unsafe fn _mm256_maskz_mulhi_epu16(k: __mmask16, a: __m256i, b: __m256i) ->
/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mulhi_epu16&expand=3965)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mulhi_epu16&expand=3965)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulhuw))]
@@ -1343,7 +1343,7 @@ pub unsafe fn _mm_mask_mulhi_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m
/// Multiply the packed unsigned 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mulhi_epu16&expand=3966)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mulhi_epu16&expand=3966)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulhuw))]
@@ -1355,7 +1355,7 @@ pub unsafe fn _mm_maskz_mulhi_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m1
/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mulhi_epi16&expand=3962)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mulhi_epi16&expand=3962)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmulhw))]
@@ -1365,7 +1365,7 @@ pub unsafe fn _mm512_mulhi_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mulhi_epi16&expand=3960)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mulhi_epi16&expand=3960)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmulhw))]
@@ -1381,7 +1381,7 @@ pub unsafe fn _mm512_mask_mulhi_epi16(
/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mulhi_epi16&expand=3961)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mulhi_epi16&expand=3961)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmulhw))]
@@ -1393,7 +1393,7 @@ pub unsafe fn _mm512_maskz_mulhi_epi16(k: __mmask32, a: __m512i, b: __m512i) ->
/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mulhi_epi16&expand=3957)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mulhi_epi16&expand=3957)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulhw))]
@@ -1409,7 +1409,7 @@ pub unsafe fn _mm256_mask_mulhi_epi16(
/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mulhi_epi16&expand=3958)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mulhi_epi16&expand=3958)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulhw))]
@@ -1421,7 +1421,7 @@ pub unsafe fn _mm256_maskz_mulhi_epi16(k: __mmask16, a: __m256i, b: __m256i) ->
/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mulhi_epi16&expand=3954)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mulhi_epi16&expand=3954)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulhw))]
@@ -1432,7 +1432,7 @@ pub unsafe fn _mm_mask_mulhi_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m
/// Multiply the packed signed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mulhi_epi16&expand=3955)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mulhi_epi16&expand=3955)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulhw))]
@@ -1444,7 +1444,7 @@ pub unsafe fn _mm_maskz_mulhi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m1
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mulhrs_epi16&expand=3986)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mulhrs_epi16&expand=3986)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmulhrsw))]
@@ -1454,7 +1454,7 @@ pub unsafe fn _mm512_mulhrs_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mulhrs_epi16&expand=3984)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mulhrs_epi16&expand=3984)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmulhrsw))]
@@ -1470,7 +1470,7 @@ pub unsafe fn _mm512_mask_mulhrs_epi16(
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mulhrs_epi16&expand=3985)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mulhrs_epi16&expand=3985)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmulhrsw))]
@@ -1482,7 +1482,7 @@ pub unsafe fn _mm512_maskz_mulhrs_epi16(k: __mmask32, a: __m512i, b: __m512i) ->
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mulhrs_epi16&expand=3981)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mulhrs_epi16&expand=3981)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulhrsw))]
@@ -1498,7 +1498,7 @@ pub unsafe fn _mm256_mask_mulhrs_epi16(
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mulhrs_epi16&expand=3982)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mulhrs_epi16&expand=3982)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulhrsw))]
@@ -1510,7 +1510,7 @@ pub unsafe fn _mm256_maskz_mulhrs_epi16(k: __mmask16, a: __m256i, b: __m256i) ->
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mulhrs_epi16&expand=3978)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mulhrs_epi16&expand=3978)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulhrsw))]
@@ -1521,7 +1521,7 @@ pub unsafe fn _mm_mask_mulhrs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits \[16:1\] to dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mulhrs_epi16&expand=3979)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mulhrs_epi16&expand=3979)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulhrsw))]
@@ -1533,7 +1533,7 @@ pub unsafe fn _mm_maskz_mulhrs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m
/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mullo_epi16&expand=3996)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mullo_epi16&expand=3996)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmullw))]
@@ -1543,7 +1543,7 @@ pub unsafe fn _mm512_mullo_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mullo_epi16&expand=3994)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mullo_epi16&expand=3994)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmullw))]
@@ -1559,7 +1559,7 @@ pub unsafe fn _mm512_mask_mullo_epi16(
/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mullo_epi16&expand=3995)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mullo_epi16&expand=3995)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmullw))]
@@ -1571,7 +1571,7 @@ pub unsafe fn _mm512_maskz_mullo_epi16(k: __mmask32, a: __m512i, b: __m512i) ->
/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mullo_epi16&expand=3991)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mullo_epi16&expand=3991)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmullw))]
@@ -1587,7 +1587,7 @@ pub unsafe fn _mm256_mask_mullo_epi16(
/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mullo_epi16&expand=3992)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mullo_epi16&expand=3992)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmullw))]
@@ -1599,7 +1599,7 @@ pub unsafe fn _mm256_maskz_mullo_epi16(k: __mmask16, a: __m256i, b: __m256i) ->
/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mullo_epi16&expand=3988)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mullo_epi16&expand=3988)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmullw))]
@@ -1610,7 +1610,7 @@ pub unsafe fn _mm_mask_mullo_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m
/// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mullo_epi16&expand=3989)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mullo_epi16&expand=3989)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmullw))]
@@ -1622,7 +1622,7 @@ pub unsafe fn _mm_maskz_mullo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m1
/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_epu16&expand=3609)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epu16&expand=3609)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaxuw))]
@@ -1632,7 +1632,7 @@ pub unsafe fn _mm512_max_epu16(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_epu16&expand=3607)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epu16&expand=3607)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaxuw))]
@@ -1643,7 +1643,7 @@ pub unsafe fn _mm512_mask_max_epu16(src: __m512i, k: __mmask32, a: __m512i, b: _
/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_epu16&expand=3608)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epu16&expand=3608)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaxuw))]
@@ -1655,7 +1655,7 @@ pub unsafe fn _mm512_maskz_max_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __
/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_epu16&expand=3604)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epu16&expand=3604)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxuw))]
@@ -1666,7 +1666,7 @@ pub unsafe fn _mm256_mask_max_epu16(src: __m256i, k: __mmask16, a: __m256i, b: _
/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_epu16&expand=3605)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epu16&expand=3605)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxuw))]
@@ -1678,7 +1678,7 @@ pub unsafe fn _mm256_maskz_max_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __
/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_epu16&expand=3601)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epu16&expand=3601)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxuw))]
@@ -1689,7 +1689,7 @@ pub unsafe fn _mm_mask_max_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compare packed unsigned 16-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_epu16&expand=3602)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epu16&expand=3602)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxuw))]
@@ -1701,7 +1701,7 @@ pub unsafe fn _mm_maskz_max_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compare packed unsigned 8-bit integers in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_epu8&expand=3636)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epu8&expand=3636)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaxub))]
@@ -1711,7 +1711,7 @@ pub unsafe fn _mm512_max_epu8(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed unsigned 8-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_epu8&expand=3634)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epu8&expand=3634)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaxub))]
@@ -1722,7 +1722,7 @@ pub unsafe fn _mm512_mask_max_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __
/// Compare packed unsigned 8-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_epu8&expand=3635)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epu8&expand=3635)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaxub))]
@@ -1734,7 +1734,7 @@ pub unsafe fn _mm512_maskz_max_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m
/// Compare packed unsigned 8-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_epu8&expand=3631)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epu8&expand=3631)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxub))]
@@ -1745,7 +1745,7 @@ pub unsafe fn _mm256_mask_max_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __
/// Compare packed unsigned 8-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_epu8&expand=3632)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epu8&expand=3632)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxub))]
@@ -1757,7 +1757,7 @@ pub unsafe fn _mm256_maskz_max_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m
/// Compare packed unsigned 8-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_epu8&expand=3628)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epu8&expand=3628)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxub))]
@@ -1768,7 +1768,7 @@ pub unsafe fn _mm_mask_max_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m12
/// Compare packed unsigned 8-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_epu8&expand=3629)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epu8&expand=3629)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxub))]
@@ -1780,7 +1780,7 @@ pub unsafe fn _mm_maskz_max_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128
/// Compare packed signed 16-bit integers in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_epi16&expand=3573)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epi16&expand=3573)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaxsw))]
@@ -1790,7 +1790,7 @@ pub unsafe fn _mm512_max_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed signed 16-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_epi16&expand=3571)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epi16&expand=3571)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaxsw))]
@@ -1801,7 +1801,7 @@ pub unsafe fn _mm512_mask_max_epi16(src: __m512i, k: __mmask32, a: __m512i, b: _
/// Compare packed signed 16-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_epi16&expand=3572)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epi16&expand=3572)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaxsw))]
@@ -1813,7 +1813,7 @@ pub unsafe fn _mm512_maskz_max_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __
/// Compare packed signed 16-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_epi16&expand=3568)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epi16&expand=3568)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsw))]
@@ -1824,7 +1824,7 @@ pub unsafe fn _mm256_mask_max_epi16(src: __m256i, k: __mmask16, a: __m256i, b: _
/// Compare packed signed 16-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_epi16&expand=3569)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epi16&expand=3569)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsw))]
@@ -1836,7 +1836,7 @@ pub unsafe fn _mm256_maskz_max_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __
/// Compare packed signed 16-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_epi16&expand=3565)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epi16&expand=3565)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsw))]
@@ -1847,7 +1847,7 @@ pub unsafe fn _mm_mask_max_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compare packed signed 16-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_epi16&expand=3566)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epi16&expand=3566)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsw))]
@@ -1859,7 +1859,7 @@ pub unsafe fn _mm_maskz_max_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compare packed signed 8-bit integers in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_epi8&expand=3600)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epi8&expand=3600)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaxsb))]
@@ -1869,7 +1869,7 @@ pub unsafe fn _mm512_max_epi8(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed signed 8-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_epi8&expand=3598)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epi8&expand=3598)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaxsb))]
@@ -1880,7 +1880,7 @@ pub unsafe fn _mm512_mask_max_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __
/// Compare packed signed 8-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_epi8&expand=3599)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epi8&expand=3599)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaxsb))]
@@ -1892,7 +1892,7 @@ pub unsafe fn _mm512_maskz_max_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m
/// Compare packed signed 8-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_epi8&expand=3595)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epi8&expand=3595)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsb))]
@@ -1903,7 +1903,7 @@ pub unsafe fn _mm256_mask_max_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __
/// Compare packed signed 8-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_epi8&expand=3596)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epi8&expand=3596)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsb))]
@@ -1915,7 +1915,7 @@ pub unsafe fn _mm256_maskz_max_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m
/// Compare packed signed 8-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_epi8&expand=3592)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epi8&expand=3592)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsb))]
@@ -1926,7 +1926,7 @@ pub unsafe fn _mm_mask_max_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m12
/// Compare packed signed 8-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_epi8&expand=3593)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epi8&expand=3593)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsb))]
@@ -1938,7 +1938,7 @@ pub unsafe fn _mm_maskz_max_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128
/// Compare packed unsigned 16-bit integers in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_epu16&expand=3723)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epu16&expand=3723)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpminuw))]
@@ -1948,7 +1948,7 @@ pub unsafe fn _mm512_min_epu16(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed unsigned 16-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_epu16&expand=3721)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epu16&expand=3721)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpminuw))]
@@ -1959,7 +1959,7 @@ pub unsafe fn _mm512_mask_min_epu16(src: __m512i, k: __mmask32, a: __m512i, b: _
/// Compare packed unsigned 16-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_epu16&expand=3722)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epu16&expand=3722)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpminuw))]
@@ -1971,7 +1971,7 @@ pub unsafe fn _mm512_maskz_min_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __
/// Compare packed unsigned 16-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_epu16&expand=3718)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epu16&expand=3718)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminuw))]
@@ -1982,7 +1982,7 @@ pub unsafe fn _mm256_mask_min_epu16(src: __m256i, k: __mmask16, a: __m256i, b: _
/// Compare packed unsigned 16-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_epu16&expand=3719)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epu16&expand=3719)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminuw))]
@@ -1994,7 +1994,7 @@ pub unsafe fn _mm256_maskz_min_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __
/// Compare packed unsigned 16-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_epu16&expand=3715)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epu16&expand=3715)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminuw))]
@@ -2005,7 +2005,7 @@ pub unsafe fn _mm_mask_min_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compare packed unsigned 16-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_epu16&expand=3716)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epu16&expand=3716)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminuw))]
@@ -2017,7 +2017,7 @@ pub unsafe fn _mm_maskz_min_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compare packed unsigned 8-bit integers in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_epu8&expand=3750)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epu8&expand=3750)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpminub))]
@@ -2027,7 +2027,7 @@ pub unsafe fn _mm512_min_epu8(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed unsigned 8-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_epu8&expand=3748)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epu8&expand=3748)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpminub))]
@@ -2038,7 +2038,7 @@ pub unsafe fn _mm512_mask_min_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __
/// Compare packed unsigned 8-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_epu8&expand=3749)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epu8&expand=3749)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpminub))]
@@ -2050,7 +2050,7 @@ pub unsafe fn _mm512_maskz_min_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m
/// Compare packed unsigned 8-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_epu8&expand=3745)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epu8&expand=3745)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminub))]
@@ -2061,7 +2061,7 @@ pub unsafe fn _mm256_mask_min_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __
/// Compare packed unsigned 8-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_epu8&expand=3746)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epu8&expand=3746)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminub))]
@@ -2073,7 +2073,7 @@ pub unsafe fn _mm256_maskz_min_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m
/// Compare packed unsigned 8-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_epu8&expand=3742)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epu8&expand=3742)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminub))]
@@ -2084,7 +2084,7 @@ pub unsafe fn _mm_mask_min_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m12
/// Compare packed unsigned 8-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_epu8&expand=3743)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epu8&expand=3743)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminub))]
@@ -2096,7 +2096,7 @@ pub unsafe fn _mm_maskz_min_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128
/// Compare packed signed 16-bit integers in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_epi16&expand=3687)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epi16&expand=3687)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpminsw))]
@@ -2106,7 +2106,7 @@ pub unsafe fn _mm512_min_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed signed 16-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_epi16&expand=3685)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epi16&expand=3685)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpminsw))]
@@ -2117,7 +2117,7 @@ pub unsafe fn _mm512_mask_min_epi16(src: __m512i, k: __mmask32, a: __m512i, b: _
/// Compare packed signed 16-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_epi16&expand=3686)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epi16&expand=3686)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpminsw))]
@@ -2129,7 +2129,7 @@ pub unsafe fn _mm512_maskz_min_epi16(k: __mmask32, a: __m512i, b: __m512i) -> __
/// Compare packed signed 16-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_epi16&expand=3682)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epi16&expand=3682)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsw))]
@@ -2140,7 +2140,7 @@ pub unsafe fn _mm256_mask_min_epi16(src: __m256i, k: __mmask16, a: __m256i, b: _
/// Compare packed signed 16-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_epi16&expand=3683)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epi16&expand=3683)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsw))]
@@ -2152,7 +2152,7 @@ pub unsafe fn _mm256_maskz_min_epi16(k: __mmask16, a: __m256i, b: __m256i) -> __
/// Compare packed signed 16-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_epi16&expand=3679)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epi16&expand=3679)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsw))]
@@ -2163,7 +2163,7 @@ pub unsafe fn _mm_mask_min_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compare packed signed 16-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_epi16&expand=3680)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epi16&expand=3680)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsw))]
@@ -2175,7 +2175,7 @@ pub unsafe fn _mm_maskz_min_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compare packed signed 8-bit integers in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_epi8&expand=3714)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epi8&expand=3714)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpminsb))]
@@ -2185,7 +2185,7 @@ pub unsafe fn _mm512_min_epi8(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed signed 8-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_epi8&expand=3712)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epi8&expand=3712)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpminsb))]
@@ -2196,7 +2196,7 @@ pub unsafe fn _mm512_mask_min_epi8(src: __m512i, k: __mmask64, a: __m512i, b: __
/// Compare packed signed 8-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_epi8&expand=3713)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epi8&expand=3713)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpminsb))]
@@ -2208,7 +2208,7 @@ pub unsafe fn _mm512_maskz_min_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m
/// Compare packed signed 8-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_epi8&expand=3709)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epi8&expand=3709)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsb))]
@@ -2219,7 +2219,7 @@ pub unsafe fn _mm256_mask_min_epi8(src: __m256i, k: __mmask32, a: __m256i, b: __
/// Compare packed signed 8-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_epi8&expand=3710)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epi8&expand=3710)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsb))]
@@ -2231,7 +2231,7 @@ pub unsafe fn _mm256_maskz_min_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m
/// Compare packed signed 8-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_epi8&expand=3706)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epi8&expand=3706)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsb))]
@@ -2242,7 +2242,7 @@ pub unsafe fn _mm_mask_min_epi8(src: __m128i, k: __mmask16, a: __m128i, b: __m12
/// Compare packed signed 8-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_epi8&expand=3707)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epi8&expand=3707)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsb))]
@@ -2254,7 +2254,7 @@ pub unsafe fn _mm_maskz_min_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128
/// Compare packed unsigned 16-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_cmplt_epu16_mask&expand=1050)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_cmplt_epu16_mask&expand=1050)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2264,7 +2264,7 @@ pub unsafe fn _mm512_cmplt_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compare packed unsigned 16-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmplt_epu16_mask&expand=1051)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epu16_mask&expand=1051)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2274,7 +2274,7 @@ pub unsafe fn _mm512_mask_cmplt_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i
/// Compare packed unsigned 16-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_cmplt_epu16_mask&expand=1050)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_cmplt_epu16_mask&expand=1050)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2284,7 +2284,7 @@ pub unsafe fn _mm256_cmplt_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compare packed unsigned 16-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmplt_epu16_mask&expand=1049)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epu16_mask&expand=1049)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2294,7 +2294,7 @@ pub unsafe fn _mm256_mask_cmplt_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i
/// Compare packed unsigned 16-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_epi16_mask&expand=1018)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi16_mask&expand=1018)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2304,7 +2304,7 @@ pub unsafe fn _mm_cmplt_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 16-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmplt_epi16_mask&expand=1019)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epi16_mask&expand=1019)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2314,7 +2314,7 @@ pub unsafe fn _mm_mask_cmplt_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 8-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm512_cmplt_epu8_mask&expand=1068)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm512_cmplt_epu8_mask&expand=1068)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2324,7 +2324,7 @@ pub unsafe fn _mm512_cmplt_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compare packed unsigned 8-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmplt_epu8_mask&expand=1069)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epu8_mask&expand=1069)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2334,7 +2334,7 @@ pub unsafe fn _mm512_mask_cmplt_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i)
/// Compare packed unsigned 8-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmplt_epu8_mask&expand=1066)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epu8_mask&expand=1066)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2344,7 +2344,7 @@ pub unsafe fn _mm256_cmplt_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compare packed unsigned 8-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmplt_epu8_mask&expand=1067)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epu8_mask&expand=1067)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2354,7 +2354,7 @@ pub unsafe fn _mm256_mask_cmplt_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i)
/// Compare packed unsigned 8-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_epu8_mask&expand=1064)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epu8_mask&expand=1064)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2364,7 +2364,7 @@ pub unsafe fn _mm_cmplt_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compare packed unsigned 8-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmplt_epu8_mask&expand=1065)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epu8_mask&expand=1065)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2374,7 +2374,7 @@ pub unsafe fn _mm_mask_cmplt_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) ->
/// Compare packed signed 16-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmplt_epi16_mask&expand=1022)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_epi16_mask&expand=1022)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2384,7 +2384,7 @@ pub unsafe fn _mm512_cmplt_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compare packed signed 16-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmplt_epi16_mask&expand=1023)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epi16_mask&expand=1023)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2394,7 +2394,7 @@ pub unsafe fn _mm512_mask_cmplt_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i
/// Compare packed signed 16-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmplt_epi16_mask&expand=1020)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epi16_mask&expand=1020)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2404,7 +2404,7 @@ pub unsafe fn _mm256_cmplt_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compare packed signed 16-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmplt_epi16_mask&expand=1021)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epi16_mask&expand=1021)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2414,7 +2414,7 @@ pub unsafe fn _mm256_mask_cmplt_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i
/// Compare packed signed 16-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_epi16_mask&expand=1018)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi16_mask&expand=1018)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2424,7 +2424,7 @@ pub unsafe fn _mm_cmplt_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 16-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmplt_epi16_mask&expand=1019)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epi16_mask&expand=1019)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2434,7 +2434,7 @@ pub unsafe fn _mm_mask_cmplt_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed signed 8-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmplt_epi8_mask&expand=1044)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_epi8_mask&expand=1044)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2444,7 +2444,7 @@ pub unsafe fn _mm512_cmplt_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compare packed signed 8-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmplt_epi8_mask&expand=1045)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epi8_mask&expand=1045)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2454,7 +2454,7 @@ pub unsafe fn _mm512_mask_cmplt_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i)
/// Compare packed signed 8-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmplt_epi8_mask&expand=1042)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epi8_mask&expand=1042)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2464,7 +2464,7 @@ pub unsafe fn _mm256_cmplt_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compare packed signed 8-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmplt_epi8_mask&expand=1043)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epi8_mask&expand=1043)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2474,7 +2474,7 @@ pub unsafe fn _mm256_mask_cmplt_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i)
/// Compare packed signed 8-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_epi8_mask&expand=1040)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi8_mask&expand=1040)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2484,7 +2484,7 @@ pub unsafe fn _mm_cmplt_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compare packed signed 8-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmplt_epi8_mask&expand=1041)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epi8_mask&expand=1041)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2494,7 +2494,7 @@ pub unsafe fn _mm_mask_cmplt_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 16-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpgt_epu16_mask&expand=927)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epu16_mask&expand=927)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2504,7 +2504,7 @@ pub unsafe fn _mm512_cmpgt_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compare packed unsigned 16-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpgt_epu16_mask&expand=928)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epu16_mask&expand=928)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2514,7 +2514,7 @@ pub unsafe fn _mm512_mask_cmpgt_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i
/// Compare packed unsigned 16-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpgt_epu16_mask&expand=925)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epu16_mask&expand=925)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2524,7 +2524,7 @@ pub unsafe fn _mm256_cmpgt_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compare packed unsigned 16-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpgt_epu16_mask&expand=926)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epu16_mask&expand=926)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2534,7 +2534,7 @@ pub unsafe fn _mm256_mask_cmpgt_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i
/// Compare packed unsigned 16-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_epu16_mask&expand=923)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epu16_mask&expand=923)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2544,7 +2544,7 @@ pub unsafe fn _mm_cmpgt_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 16-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpgt_epu16_mask&expand=924)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epu16_mask&expand=924)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2554,7 +2554,7 @@ pub unsafe fn _mm_mask_cmpgt_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 8-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpgt_epu8_mask&expand=945)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epu8_mask&expand=945)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2564,7 +2564,7 @@ pub unsafe fn _mm512_cmpgt_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compare packed unsigned 8-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpgt_epu8_mask&expand=946)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epu8_mask&expand=946)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2574,7 +2574,7 @@ pub unsafe fn _mm512_mask_cmpgt_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i)
/// Compare packed unsigned 8-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpgt_epu8_mask&expand=943)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epu8_mask&expand=943)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2584,7 +2584,7 @@ pub unsafe fn _mm256_cmpgt_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compare packed unsigned 8-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpgt_epu8_mask&expand=944)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epu8_mask&expand=944)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2594,7 +2594,7 @@ pub unsafe fn _mm256_mask_cmpgt_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i)
/// Compare packed unsigned 8-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_epu8_mask&expand=941)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epu8_mask&expand=941)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2604,7 +2604,7 @@ pub unsafe fn _mm_cmpgt_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compare packed unsigned 8-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpgt_epu8_mask&expand=942)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epu8_mask&expand=942)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2614,7 +2614,7 @@ pub unsafe fn _mm_mask_cmpgt_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) ->
/// Compare packed signed 16-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpgt_epi16_mask&expand=897)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epi16_mask&expand=897)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2624,7 +2624,7 @@ pub unsafe fn _mm512_cmpgt_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compare packed signed 16-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpgt_epi16_mask&expand=898)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epi16_mask&expand=898)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2634,7 +2634,7 @@ pub unsafe fn _mm512_mask_cmpgt_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i
/// Compare packed signed 16-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpgt_epi16_mask&expand=895)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epi16_mask&expand=895)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2644,7 +2644,7 @@ pub unsafe fn _mm256_cmpgt_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compare packed signed 16-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpgt_epi16_mask&expand=896)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epi16_mask&expand=896)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2654,7 +2654,7 @@ pub unsafe fn _mm256_mask_cmpgt_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i
/// Compare packed signed 16-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_epi16_mask&expand=893)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi16_mask&expand=893)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2664,7 +2664,7 @@ pub unsafe fn _mm_cmpgt_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 16-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpgt_epi16_mask&expand=894)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epi16_mask&expand=894)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2674,7 +2674,7 @@ pub unsafe fn _mm_mask_cmpgt_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed signed 8-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpgt_epi8_mask&expand=921)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epi8_mask&expand=921)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2684,7 +2684,7 @@ pub unsafe fn _mm512_cmpgt_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compare packed signed 8-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpgt_epi8_mask&expand=922)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epi8_mask&expand=922)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2694,7 +2694,7 @@ pub unsafe fn _mm512_mask_cmpgt_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i)
/// Compare packed signed 8-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpgt_epi8_mask&expand=919)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epi8_mask&expand=919)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2704,7 +2704,7 @@ pub unsafe fn _mm256_cmpgt_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compare packed signed 8-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpgt_epi8_mask&expand=920)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epi8_mask&expand=920)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2714,7 +2714,7 @@ pub unsafe fn _mm256_mask_cmpgt_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i)
/// Compare packed signed 8-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_epi8_mask&expand=917)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi8_mask&expand=917)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2724,7 +2724,7 @@ pub unsafe fn _mm_cmpgt_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compare packed signed 8-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpgt_epi8_mask&expand=918)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epi8_mask&expand=918)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2734,7 +2734,7 @@ pub unsafe fn _mm_mask_cmpgt_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 16-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmple_epu16_mask&expand=989)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epu16_mask&expand=989)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2744,7 +2744,7 @@ pub unsafe fn _mm512_cmple_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compare packed unsigned 16-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmple_epu16_mask&expand=990)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epu16_mask&expand=990)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2754,7 +2754,7 @@ pub unsafe fn _mm512_mask_cmple_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i
/// Compare packed unsigned 16-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmple_epu16_mask&expand=987)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epu16_mask&expand=987)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2764,7 +2764,7 @@ pub unsafe fn _mm256_cmple_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compare packed unsigned 16-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmple_epu16_mask&expand=988)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epu16_mask&expand=988)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2774,7 +2774,7 @@ pub unsafe fn _mm256_mask_cmple_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i
/// Compare packed unsigned 16-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_epu16_mask&expand=985)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epu16_mask&expand=985)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2784,7 +2784,7 @@ pub unsafe fn _mm_cmple_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 16-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmple_epu16_mask&expand=986)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epu16_mask&expand=986)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2794,7 +2794,7 @@ pub unsafe fn _mm_mask_cmple_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 8-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmple_epu8_mask&expand=1007)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epu8_mask&expand=1007)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2804,7 +2804,7 @@ pub unsafe fn _mm512_cmple_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compare packed unsigned 8-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmple_epu8_mask&expand=1008)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epu8_mask&expand=1008)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2814,7 +2814,7 @@ pub unsafe fn _mm512_mask_cmple_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i)
/// Compare packed unsigned 8-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmple_epu8_mask&expand=1005)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epu8_mask&expand=1005)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2824,7 +2824,7 @@ pub unsafe fn _mm256_cmple_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compare packed unsigned 8-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmple_epu8_mask&expand=1006)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epu8_mask&expand=1006)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2834,7 +2834,7 @@ pub unsafe fn _mm256_mask_cmple_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i)
/// Compare packed unsigned 8-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_epu8_mask&expand=1003)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epu8_mask&expand=1003)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2844,7 +2844,7 @@ pub unsafe fn _mm_cmple_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compare packed unsigned 8-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmple_epu8_mask&expand=1004)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epu8_mask&expand=1004)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2854,7 +2854,7 @@ pub unsafe fn _mm_mask_cmple_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) ->
/// Compare packed signed 16-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmple_epi16_mask&expand=965)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epi16_mask&expand=965)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2864,7 +2864,7 @@ pub unsafe fn _mm512_cmple_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compare packed signed 16-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmple_epi16_mask&expand=966)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epi16_mask&expand=966)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2874,7 +2874,7 @@ pub unsafe fn _mm512_mask_cmple_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i
/// Compare packed signed 16-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmple_epi16_mask&expand=963)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epi16_mask&expand=963)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2884,7 +2884,7 @@ pub unsafe fn _mm256_cmple_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compare packed signed 16-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmple_epi16_mask&expand=964)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epi16_mask&expand=964)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2894,7 +2894,7 @@ pub unsafe fn _mm256_mask_cmple_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i
/// Compare packed signed 16-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_epi16_mask&expand=961)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epi16_mask&expand=961)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2904,7 +2904,7 @@ pub unsafe fn _mm_cmple_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 16-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmple_epi16_mask&expand=962)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epi16_mask&expand=962)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2914,7 +2914,7 @@ pub unsafe fn _mm_mask_cmple_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed signed 8-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmple_epi8_mask&expand=983)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epi8_mask&expand=983)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2924,7 +2924,7 @@ pub unsafe fn _mm512_cmple_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compare packed signed 8-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmple_epi8_mask&expand=984)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epi8_mask&expand=984)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2934,7 +2934,7 @@ pub unsafe fn _mm512_mask_cmple_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i)
/// Compare packed signed 8-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmple_epi8_mask&expand=981)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epi8_mask&expand=981)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2944,7 +2944,7 @@ pub unsafe fn _mm256_cmple_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compare packed signed 8-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmple_epi8_mask&expand=982)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epi8_mask&expand=982)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2954,7 +2954,7 @@ pub unsafe fn _mm256_mask_cmple_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i)
/// Compare packed signed 8-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_epi8_mask&expand=979)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epi8_mask&expand=979)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2964,7 +2964,7 @@ pub unsafe fn _mm_cmple_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compare packed signed 8-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmple_epi8_mask&expand=980)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epi8_mask&expand=980)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2974,7 +2974,7 @@ pub unsafe fn _mm_mask_cmple_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 16-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpge_epu16_mask&expand=867)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epu16_mask&expand=867)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2984,7 +2984,7 @@ pub unsafe fn _mm512_cmpge_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compare packed unsigned 16-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpge_epu16_mask&expand=868)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epu16_mask&expand=868)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -2994,7 +2994,7 @@ pub unsafe fn _mm512_mask_cmpge_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i
/// Compare packed unsigned 16-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpge_epu16_mask&expand=865)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epu16_mask&expand=865)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3004,7 +3004,7 @@ pub unsafe fn _mm256_cmpge_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compare packed unsigned 16-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpge_epu16_mask&expand=866)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epu16_mask&expand=866)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3014,7 +3014,7 @@ pub unsafe fn _mm256_mask_cmpge_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i
/// Compare packed unsigned 16-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_epu16_mask&expand=863)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epu16_mask&expand=863)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3024,7 +3024,7 @@ pub unsafe fn _mm_cmpge_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 16-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpge_epu16_mask&expand=864)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epu16_mask&expand=864)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3034,7 +3034,7 @@ pub unsafe fn _mm_mask_cmpge_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 8-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpge_epu8_mask&expand=885)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epu8_mask&expand=885)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3044,7 +3044,7 @@ pub unsafe fn _mm512_cmpge_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compare packed unsigned 8-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpge_epu8_mask&expand=886)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epu8_mask&expand=886)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3054,7 +3054,7 @@ pub unsafe fn _mm512_mask_cmpge_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i)
/// Compare packed unsigned 8-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpge_epu8_mask&expand=883)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epu8_mask&expand=883)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3064,7 +3064,7 @@ pub unsafe fn _mm256_cmpge_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compare packed unsigned 8-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpge_epu8_mask&expand=884)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epu8_mask&expand=884)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3074,7 +3074,7 @@ pub unsafe fn _mm256_mask_cmpge_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i)
/// Compare packed unsigned 8-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_epu8_mask&expand=881)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epu8_mask&expand=881)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3084,7 +3084,7 @@ pub unsafe fn _mm_cmpge_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compare packed unsigned 8-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpge_epu8_mask&expand=882)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epu8_mask&expand=882)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3094,7 +3094,7 @@ pub unsafe fn _mm_mask_cmpge_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) ->
/// Compare packed signed 16-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpge_epi16_mask&expand=843)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epi16_mask&expand=843)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3104,7 +3104,7 @@ pub unsafe fn _mm512_cmpge_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compare packed signed 16-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpge_epi16_mask&expand=844)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epi16_mask&expand=844)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3114,7 +3114,7 @@ pub unsafe fn _mm512_mask_cmpge_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i
/// Compare packed signed 16-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpge_epi16_mask&expand=841)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epi16_mask&expand=841)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3124,7 +3124,7 @@ pub unsafe fn _mm256_cmpge_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compare packed signed 16-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpge_epi16_mask&expand=842)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epi16_mask&expand=842)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3134,7 +3134,7 @@ pub unsafe fn _mm256_mask_cmpge_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i
/// Compare packed signed 16-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_epi16_mask&expand=839)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epi16_mask&expand=839)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3144,7 +3144,7 @@ pub unsafe fn _mm_cmpge_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 16-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpge_epi16_mask&expand=840)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epi16_mask&expand=840)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3154,7 +3154,7 @@ pub unsafe fn _mm_mask_cmpge_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed signed 8-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpge_epi8_mask&expand=861)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epi8_mask&expand=861)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3164,7 +3164,7 @@ pub unsafe fn _mm512_cmpge_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compare packed signed 8-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpge_epi8_mask&expand=862)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epi8_mask&expand=862)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3174,7 +3174,7 @@ pub unsafe fn _mm512_mask_cmpge_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i)
/// Compare packed signed 8-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpge_epi8_mask&expand=859)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epi8_mask&expand=859)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3184,7 +3184,7 @@ pub unsafe fn _mm256_cmpge_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compare packed signed 8-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpge_epi8_mask&expand=860)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epi8_mask&expand=860)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3194,7 +3194,7 @@ pub unsafe fn _mm256_mask_cmpge_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i)
/// Compare packed signed 8-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_epi8_mask&expand=857)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epi8_mask&expand=857)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3204,7 +3204,7 @@ pub unsafe fn _mm_cmpge_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compare packed signed 8-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpge_epi8_mask&expand=858)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epi8_mask&expand=858)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3214,7 +3214,7 @@ pub unsafe fn _mm_mask_cmpge_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 16-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpeq_epu16_mask&expand=801)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epu16_mask&expand=801)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3224,7 +3224,7 @@ pub unsafe fn _mm512_cmpeq_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compare packed unsigned 16-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpeq_epu16_mask&expand=802)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epu16_mask&expand=802)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3234,7 +3234,7 @@ pub unsafe fn _mm512_mask_cmpeq_epu16_mask(k1: __mmask32, a: __m512i, b: __m512i
/// Compare packed unsigned 16-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpeq_epu16_mask&expand=799)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epu16_mask&expand=799)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3244,7 +3244,7 @@ pub unsafe fn _mm256_cmpeq_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compare packed unsigned 16-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpeq_epu16_mask&expand=800)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epu16_mask&expand=800)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3254,7 +3254,7 @@ pub unsafe fn _mm256_mask_cmpeq_epu16_mask(k1: __mmask16, a: __m256i, b: __m256i
/// Compare packed unsigned 16-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_epu16_mask&expand=797)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epu16_mask&expand=797)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3264,7 +3264,7 @@ pub unsafe fn _mm_cmpeq_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 16-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpeq_epu16_mask&expand=798)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epu16_mask&expand=798)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3274,7 +3274,7 @@ pub unsafe fn _mm_mask_cmpeq_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 8-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpeq_epu8_mask&expand=819)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epu8_mask&expand=819)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3284,7 +3284,7 @@ pub unsafe fn _mm512_cmpeq_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compare packed unsigned 8-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpeq_epu8_mask&expand=820)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epu8_mask&expand=820)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3294,7 +3294,7 @@ pub unsafe fn _mm512_mask_cmpeq_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i)
/// Compare packed unsigned 8-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpeq_epu8_mask&expand=817)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epu8_mask&expand=817)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3304,7 +3304,7 @@ pub unsafe fn _mm256_cmpeq_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compare packed unsigned 8-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpeq_epu8_mask&expand=818)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epu8_mask&expand=818)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3314,7 +3314,7 @@ pub unsafe fn _mm256_mask_cmpeq_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i)
/// Compare packed unsigned 8-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_epu8_mask&expand=815)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epu8_mask&expand=815)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3324,7 +3324,7 @@ pub unsafe fn _mm_cmpeq_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compare packed unsigned 8-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpeq_epu8_mask&expand=816)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epu8_mask&expand=816)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3334,7 +3334,7 @@ pub unsafe fn _mm_mask_cmpeq_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) ->
/// Compare packed signed 16-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpeq_epi16_mask&expand=771)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epi16_mask&expand=771)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3344,7 +3344,7 @@ pub unsafe fn _mm512_cmpeq_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compare packed signed 16-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpeq_epi16_mask&expand=772)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epi16_mask&expand=772)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3354,7 +3354,7 @@ pub unsafe fn _mm512_mask_cmpeq_epi16_mask(k1: __mmask32, a: __m512i, b: __m512i
/// Compare packed signed 16-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpeq_epi16_mask&expand=769)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epi16_mask&expand=769)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3364,7 +3364,7 @@ pub unsafe fn _mm256_cmpeq_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compare packed signed 16-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpeq_epi16_mask&expand=770)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epi16_mask&expand=770)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3374,7 +3374,7 @@ pub unsafe fn _mm256_mask_cmpeq_epi16_mask(k1: __mmask16, a: __m256i, b: __m256i
/// Compare packed signed 16-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_epi16_mask&expand=767)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi16_mask&expand=767)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3384,7 +3384,7 @@ pub unsafe fn _mm_cmpeq_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 16-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpeq_epi16_mask&expand=768)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epi16_mask&expand=768)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3394,7 +3394,7 @@ pub unsafe fn _mm_mask_cmpeq_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed signed 8-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpeq_epi8_mask&expand=795)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epi8_mask&expand=795)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3404,7 +3404,7 @@ pub unsafe fn _mm512_cmpeq_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compare packed signed 8-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpeq_epi8_mask&expand=796)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epi8_mask&expand=796)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3414,7 +3414,7 @@ pub unsafe fn _mm512_mask_cmpeq_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i)
/// Compare packed signed 8-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpeq_epi8_mask&expand=793)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epi8_mask&expand=793)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3424,7 +3424,7 @@ pub unsafe fn _mm256_cmpeq_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compare packed signed 8-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpeq_epi8_mask&expand=794)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epi8_mask&expand=794)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3434,7 +3434,7 @@ pub unsafe fn _mm256_mask_cmpeq_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i)
/// Compare packed signed 8-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_epi8_mask&expand=791)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi8_mask&expand=791)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3444,7 +3444,7 @@ pub unsafe fn _mm_cmpeq_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compare packed signed 8-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpeq_epi8_mask&expand=792)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epi8_mask&expand=792)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3454,7 +3454,7 @@ pub unsafe fn _mm_mask_cmpeq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 16-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpneq_epu16_mask&expand=1106)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epu16_mask&expand=1106)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3464,7 +3464,7 @@ pub unsafe fn _mm512_cmpneq_epu16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compare packed unsigned 16-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpneq_epu16_mask&expand=1107)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epu16_mask&expand=1107)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3474,7 +3474,7 @@ pub unsafe fn _mm512_mask_cmpneq_epu16_mask(k1: __mmask32, a: __m512i, b: __m512
/// Compare packed unsigned 16-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpneq_epu16_mask&expand=1104)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epu16_mask&expand=1104)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3484,7 +3484,7 @@ pub unsafe fn _mm256_cmpneq_epu16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compare packed unsigned 16-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpneq_epu16_mask&expand=1105)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epu16_mask&expand=1105)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3494,7 +3494,7 @@ pub unsafe fn _mm256_mask_cmpneq_epu16_mask(k1: __mmask16, a: __m256i, b: __m256
/// Compare packed unsigned 16-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_epu16_mask&expand=1102)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epu16_mask&expand=1102)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3504,7 +3504,7 @@ pub unsafe fn _mm_cmpneq_epu16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 16-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpneq_epu16_mask&expand=1103)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epu16_mask&expand=1103)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3514,7 +3514,7 @@ pub unsafe fn _mm_mask_cmpneq_epu16_mask(k1: __mmask8, a: __m128i, b: __m128i) -
/// Compare packed unsigned 8-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpneq_epu8_mask&expand=1124)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epu8_mask&expand=1124)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3524,7 +3524,7 @@ pub unsafe fn _mm512_cmpneq_epu8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compare packed unsigned 8-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpneq_epu8_mask&expand=1125)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epu8_mask&expand=1125)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3534,7 +3534,7 @@ pub unsafe fn _mm512_mask_cmpneq_epu8_mask(k1: __mmask64, a: __m512i, b: __m512i
/// Compare packed unsigned 8-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpneq_epu8_mask&expand=1122)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epu8_mask&expand=1122)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3544,7 +3544,7 @@ pub unsafe fn _mm256_cmpneq_epu8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compare packed unsigned 8-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpneq_epu8_mask&expand=1123)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epu8_mask&expand=1123)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3554,7 +3554,7 @@ pub unsafe fn _mm256_mask_cmpneq_epu8_mask(k1: __mmask32, a: __m256i, b: __m256i
/// Compare packed unsigned 8-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_epu8_mask&expand=1120)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epu8_mask&expand=1120)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3564,7 +3564,7 @@ pub unsafe fn _mm_cmpneq_epu8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compare packed unsigned 8-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpneq_epu8_mask&expand=1121)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epu8_mask&expand=1121)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3574,7 +3574,7 @@ pub unsafe fn _mm_mask_cmpneq_epu8_mask(k1: __mmask16, a: __m128i, b: __m128i) -
/// Compare packed signed 16-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpneq_epi16_mask&expand=1082)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epi16_mask&expand=1082)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3584,7 +3584,7 @@ pub unsafe fn _mm512_cmpneq_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compare packed signed 16-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpneq_epi16_mask&expand=1083)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epi16_mask&expand=1083)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3594,7 +3594,7 @@ pub unsafe fn _mm512_mask_cmpneq_epi16_mask(k1: __mmask32, a: __m512i, b: __m512
/// Compare packed signed 16-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpneq_epi16_mask&expand=1080)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epi16_mask&expand=1080)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3604,7 +3604,7 @@ pub unsafe fn _mm256_cmpneq_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compare packed signed 16-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpneq_epi16_mask&expand=1081)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epi16_mask&expand=1081)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3614,7 +3614,7 @@ pub unsafe fn _mm256_mask_cmpneq_epi16_mask(k1: __mmask16, a: __m256i, b: __m256
/// Compare packed signed 16-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_epi16_mask&expand=1078)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epi16_mask&expand=1078)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3624,7 +3624,7 @@ pub unsafe fn _mm_cmpneq_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 16-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpneq_epi16_mask&expand=1079)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epi16_mask&expand=1079)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3634,7 +3634,7 @@ pub unsafe fn _mm_mask_cmpneq_epi16_mask(k1: __mmask8, a: __m128i, b: __m128i) -
/// Compare packed signed 8-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpneq_epi8_mask&expand=1100)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epi8_mask&expand=1100)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3644,7 +3644,7 @@ pub unsafe fn _mm512_cmpneq_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compare packed signed 8-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpneq_epi8_mask&expand=1101)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epi8_mask&expand=1101)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3654,7 +3654,7 @@ pub unsafe fn _mm512_mask_cmpneq_epi8_mask(k1: __mmask64, a: __m512i, b: __m512i
/// Compare packed signed 8-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpneq_epi8_mask&expand=1098)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epi8_mask&expand=1098)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3664,7 +3664,7 @@ pub unsafe fn _mm256_cmpneq_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compare packed signed 8-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpneq_epi8_mask&expand=1099)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epi8_mask&expand=1099)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3674,7 +3674,7 @@ pub unsafe fn _mm256_mask_cmpneq_epi8_mask(k1: __mmask32, a: __m256i, b: __m256i
/// Compare packed signed 8-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_epi8_mask&expand=1096)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epi8_mask&expand=1096)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3684,7 +3684,7 @@ pub unsafe fn _mm_cmpneq_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compare packed signed 8-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpneq_epi8_mask&expand=1097)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epi8_mask&expand=1097)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))]
@@ -3694,13 +3694,13 @@ pub unsafe fn _mm_mask_cmpneq_epi8_mask(k1: __mmask16, a: __m128i, b: __m128i) -
/// Compare packed unsigned 16-bit integers in a and b based on the comparison operand specified by `IMM8`, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_epu16_mask&expand=715)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epu16_mask&expand=715)
#[inline]
#[target_feature(enable = "avx512bw")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))]
pub unsafe fn _mm512_cmp_epu16_mask<const IMM8: i32>(a: __m512i, b: __m512i) -> __mmask32 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u16x32();
let b = b.as_u16x32();
let r = vpcmpuw(a, b, IMM8, 0b11111111_11111111_11111111_11111111);
@@ -3709,7 +3709,7 @@ pub unsafe fn _mm512_cmp_epu16_mask<const IMM8: i32>(a: __m512i, b: __m512i) ->
/// Compare packed unsigned 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_epu16_mask&expand=716)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epu16_mask&expand=716)
#[inline]
#[target_feature(enable = "avx512bw")]
#[rustc_legacy_const_generics(3)]
@@ -3719,7 +3719,7 @@ pub unsafe fn _mm512_mask_cmp_epu16_mask<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __mmask32 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u16x32();
let b = b.as_u16x32();
let r = vpcmpuw(a, b, IMM8, k1);
@@ -3728,13 +3728,13 @@ pub unsafe fn _mm512_mask_cmp_epu16_mask<const IMM8: i32>(
/// Compare packed unsigned 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_epu16_mask&expand=713)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epu16_mask&expand=713)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))]
pub unsafe fn _mm256_cmp_epu16_mask<const IMM8: i32>(a: __m256i, b: __m256i) -> __mmask16 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u16x16();
let b = b.as_u16x16();
let r = vpcmpuw256(a, b, IMM8, 0b11111111_11111111);
@@ -3743,7 +3743,7 @@ pub unsafe fn _mm256_cmp_epu16_mask<const IMM8: i32>(a: __m256i, b: __m256i) ->
/// Compare packed unsigned 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_epu16_mask&expand=714)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epu16_mask&expand=714)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -3753,7 +3753,7 @@ pub unsafe fn _mm256_mask_cmp_epu16_mask<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __mmask16 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u16x16();
let b = b.as_u16x16();
let r = vpcmpuw256(a, b, IMM8, k1);
@@ -3762,13 +3762,13 @@ pub unsafe fn _mm256_mask_cmp_epu16_mask<const IMM8: i32>(
/// Compare packed unsigned 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_epu16_mask&expand=711)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epu16_mask&expand=711)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))]
pub unsafe fn _mm_cmp_epu16_mask<const IMM8: i32>(a: __m128i, b: __m128i) -> __mmask8 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u16x8();
let b = b.as_u16x8();
let r = vpcmpuw128(a, b, IMM8, 0b11111111);
@@ -3777,7 +3777,7 @@ pub unsafe fn _mm_cmp_epu16_mask<const IMM8: i32>(a: __m128i, b: __m128i) -> __m
/// Compare packed unsigned 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_epu16_mask&expand=712)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epu16_mask&expand=712)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -3787,7 +3787,7 @@ pub unsafe fn _mm_mask_cmp_epu16_mask<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __mmask8 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u16x8();
let b = b.as_u16x8();
let r = vpcmpuw128(a, b, IMM8, k1);
@@ -3796,13 +3796,13 @@ pub unsafe fn _mm_mask_cmp_epu16_mask<const IMM8: i32>(
/// Compare packed unsigned 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_epu8_mask&expand=733)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epu8_mask&expand=733)
#[inline]
#[target_feature(enable = "avx512bw")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))]
pub unsafe fn _mm512_cmp_epu8_mask<const IMM8: i32>(a: __m512i, b: __m512i) -> __mmask64 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u8x64();
let b = b.as_u8x64();
let r = vpcmpub(
@@ -3816,7 +3816,7 @@ pub unsafe fn _mm512_cmp_epu8_mask<const IMM8: i32>(a: __m512i, b: __m512i) -> _
/// Compare packed unsigned 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_epu8_mask&expand=734)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epu8_mask&expand=734)
#[inline]
#[target_feature(enable = "avx512bw")]
#[rustc_legacy_const_generics(3)]
@@ -3826,7 +3826,7 @@ pub unsafe fn _mm512_mask_cmp_epu8_mask<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __mmask64 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u8x64();
let b = b.as_u8x64();
let r = vpcmpub(a, b, IMM8, k1);
@@ -3835,13 +3835,13 @@ pub unsafe fn _mm512_mask_cmp_epu8_mask<const IMM8: i32>(
/// Compare packed unsigned 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_epu8_mask&expand=731)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epu8_mask&expand=731)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))]
pub unsafe fn _mm256_cmp_epu8_mask<const IMM8: i32>(a: __m256i, b: __m256i) -> __mmask32 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u8x32();
let b = b.as_u8x32();
let r = vpcmpub256(a, b, IMM8, 0b11111111_11111111_11111111_11111111);
@@ -3850,7 +3850,7 @@ pub unsafe fn _mm256_cmp_epu8_mask<const IMM8: i32>(a: __m256i, b: __m256i) -> _
/// Compare packed unsigned 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_epu8_mask&expand=732)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epu8_mask&expand=732)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -3860,7 +3860,7 @@ pub unsafe fn _mm256_mask_cmp_epu8_mask<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __mmask32 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u8x32();
let b = b.as_u8x32();
let r = vpcmpub256(a, b, IMM8, k1);
@@ -3869,13 +3869,13 @@ pub unsafe fn _mm256_mask_cmp_epu8_mask<const IMM8: i32>(
/// Compare packed unsigned 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_epu8_mask&expand=729)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epu8_mask&expand=729)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))]
pub unsafe fn _mm_cmp_epu8_mask<const IMM8: i32>(a: __m128i, b: __m128i) -> __mmask16 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u8x16();
let b = b.as_u8x16();
let r = vpcmpub128(a, b, IMM8, 0b11111111_11111111);
@@ -3884,7 +3884,7 @@ pub unsafe fn _mm_cmp_epu8_mask<const IMM8: i32>(a: __m128i, b: __m128i) -> __mm
/// Compare packed unsigned 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_epu8_mask&expand=730)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epu8_mask&expand=730)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -3894,7 +3894,7 @@ pub unsafe fn _mm_mask_cmp_epu8_mask<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __mmask16 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_u8x16();
let b = b.as_u8x16();
let r = vpcmpub128(a, b, IMM8, k1);
@@ -3903,13 +3903,13 @@ pub unsafe fn _mm_mask_cmp_epu8_mask<const IMM8: i32>(
/// Compare packed signed 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_epi16_mask&expand=691)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epi16_mask&expand=691)
#[inline]
#[target_feature(enable = "avx512bw")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))]
pub unsafe fn _mm512_cmp_epi16_mask<const IMM8: i32>(a: __m512i, b: __m512i) -> __mmask32 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i16x32();
let b = b.as_i16x32();
let r = vpcmpw(a, b, IMM8, 0b11111111_11111111_11111111_11111111);
@@ -3918,7 +3918,7 @@ pub unsafe fn _mm512_cmp_epi16_mask<const IMM8: i32>(a: __m512i, b: __m512i) ->
/// Compare packed signed 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_epi16_mask&expand=692)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epi16_mask&expand=692)
#[inline]
#[target_feature(enable = "avx512bw")]
#[rustc_legacy_const_generics(3)]
@@ -3928,7 +3928,7 @@ pub unsafe fn _mm512_mask_cmp_epi16_mask<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __mmask32 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i16x32();
let b = b.as_i16x32();
let r = vpcmpw(a, b, IMM8, k1);
@@ -3937,13 +3937,13 @@ pub unsafe fn _mm512_mask_cmp_epi16_mask<const IMM8: i32>(
/// Compare packed signed 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_epi16_mask&expand=689)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epi16_mask&expand=689)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))]
pub unsafe fn _mm256_cmp_epi16_mask<const IMM8: i32>(a: __m256i, b: __m256i) -> __mmask16 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i16x16();
let b = b.as_i16x16();
let r = vpcmpw256(a, b, IMM8, 0b11111111_11111111);
@@ -3952,7 +3952,7 @@ pub unsafe fn _mm256_cmp_epi16_mask<const IMM8: i32>(a: __m256i, b: __m256i) ->
/// Compare packed signed 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_epi16_mask&expand=690)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epi16_mask&expand=690)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -3962,7 +3962,7 @@ pub unsafe fn _mm256_mask_cmp_epi16_mask<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __mmask16 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i16x16();
let b = b.as_i16x16();
let r = vpcmpw256(a, b, IMM8, k1);
@@ -3971,13 +3971,13 @@ pub unsafe fn _mm256_mask_cmp_epi16_mask<const IMM8: i32>(
/// Compare packed signed 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_epi16_mask&expand=687)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epi16_mask&expand=687)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))]
pub unsafe fn _mm_cmp_epi16_mask<const IMM8: i32>(a: __m128i, b: __m128i) -> __mmask8 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i16x8();
let b = b.as_i16x8();
let r = vpcmpw128(a, b, IMM8, 0b11111111);
@@ -3986,7 +3986,7 @@ pub unsafe fn _mm_cmp_epi16_mask<const IMM8: i32>(a: __m128i, b: __m128i) -> __m
/// Compare packed signed 16-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_epi16_mask&expand=688)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epi16_mask&expand=688)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -3996,7 +3996,7 @@ pub unsafe fn _mm_mask_cmp_epi16_mask<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __mmask8 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i16x8();
let b = b.as_i16x8();
let r = vpcmpw128(a, b, IMM8, k1);
@@ -4005,13 +4005,13 @@ pub unsafe fn _mm_mask_cmp_epi16_mask<const IMM8: i32>(
/// Compare packed signed 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_epi8_mask&expand=709)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epi8_mask&expand=709)
#[inline]
#[target_feature(enable = "avx512bw")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))]
pub unsafe fn _mm512_cmp_epi8_mask<const IMM8: i32>(a: __m512i, b: __m512i) -> __mmask64 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i8x64();
let b = b.as_i8x64();
let r = vpcmpb(
@@ -4025,7 +4025,7 @@ pub unsafe fn _mm512_cmp_epi8_mask<const IMM8: i32>(a: __m512i, b: __m512i) -> _
/// Compare packed signed 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_epi8_mask&expand=710)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epi8_mask&expand=710)
#[inline]
#[target_feature(enable = "avx512bw")]
#[rustc_legacy_const_generics(3)]
@@ -4035,7 +4035,7 @@ pub unsafe fn _mm512_mask_cmp_epi8_mask<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __mmask64 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i8x64();
let b = b.as_i8x64();
let r = vpcmpb(a, b, IMM8, k1);
@@ -4044,13 +4044,13 @@ pub unsafe fn _mm512_mask_cmp_epi8_mask<const IMM8: i32>(
/// Compare packed signed 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_epi8_mask&expand=707)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epi8_mask&expand=707)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))]
pub unsafe fn _mm256_cmp_epi8_mask<const IMM8: i32>(a: __m256i, b: __m256i) -> __mmask32 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i8x32();
let b = b.as_i8x32();
let r = vpcmpb256(a, b, IMM8, 0b11111111_11111111_11111111_11111111);
@@ -4059,7 +4059,7 @@ pub unsafe fn _mm256_cmp_epi8_mask<const IMM8: i32>(a: __m256i, b: __m256i) -> _
/// Compare packed signed 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_epi8_mask&expand=708)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epi8_mask&expand=708)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -4069,7 +4069,7 @@ pub unsafe fn _mm256_mask_cmp_epi8_mask<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __mmask32 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i8x32();
let b = b.as_i8x32();
let r = vpcmpb256(a, b, IMM8, k1);
@@ -4078,13 +4078,13 @@ pub unsafe fn _mm256_mask_cmp_epi8_mask<const IMM8: i32>(
/// Compare packed signed 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_epi8_mask&expand=705)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epi8_mask&expand=705)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM8 = 0))]
pub unsafe fn _mm_cmp_epi8_mask<const IMM8: i32>(a: __m128i, b: __m128i) -> __mmask16 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i8x16();
let b = b.as_i8x16();
let r = vpcmpb128(a, b, IMM8, 0b11111111_11111111);
@@ -4093,7 +4093,7 @@ pub unsafe fn _mm_cmp_epi8_mask<const IMM8: i32>(a: __m128i, b: __m128i) -> __mm
/// Compare packed signed 8-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_epi8_mask&expand=706)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epi8_mask&expand=706)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -4103,7 +4103,7 @@ pub unsafe fn _mm_mask_cmp_epi8_mask<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __mmask16 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
let a = a.as_i8x16();
let b = b.as_i8x16();
let r = vpcmpb128(a, b, IMM8, k1);
@@ -4112,7 +4112,7 @@ pub unsafe fn _mm_mask_cmp_epi8_mask<const IMM8: i32>(
/// Load 512-bits (composed of 32 packed 16-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_loadu_epi16&expand=3368)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_epi16&expand=3368)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16
@@ -4122,7 +4122,7 @@ pub unsafe fn _mm512_loadu_epi16(mem_addr: *const i16) -> __m512i {
/// Load 256-bits (composed of 16 packed 16-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu_epi16&expand=3365)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu_epi16&expand=3365)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16
@@ -4132,7 +4132,7 @@ pub unsafe fn _mm256_loadu_epi16(mem_addr: *const i16) -> __m256i {
/// Load 128-bits (composed of 8 packed 16-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_epi16&expand=3362)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_epi16&expand=3362)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16
@@ -4142,7 +4142,7 @@ pub unsafe fn _mm_loadu_epi16(mem_addr: *const i16) -> __m128i {
/// Load 512-bits (composed of 64 packed 8-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_loadu_epi8&expand=3395)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_epi8&expand=3395)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8
@@ -4152,7 +4152,7 @@ pub unsafe fn _mm512_loadu_epi8(mem_addr: *const i8) -> __m512i {
/// Load 256-bits (composed of 32 packed 8-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu_epi8&expand=3392)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu_epi8&expand=3392)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8
@@ -4162,7 +4162,7 @@ pub unsafe fn _mm256_loadu_epi8(mem_addr: *const i8) -> __m256i {
/// Load 128-bits (composed of 16 packed 8-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_epi8&expand=3389)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_epi8&expand=3389)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8
@@ -4172,7 +4172,7 @@ pub unsafe fn _mm_loadu_epi8(mem_addr: *const i8) -> __m128i {
/// Store 512-bits (composed of 32 packed 16-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_storeu_epi16&expand=5622)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_epi16&expand=5622)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16
@@ -4182,7 +4182,7 @@ pub unsafe fn _mm512_storeu_epi16(mem_addr: *mut i16, a: __m512i) {
/// Store 256-bits (composed of 16 packed 16-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu_epi16&expand=5620)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu_epi16&expand=5620)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16
@@ -4192,7 +4192,7 @@ pub unsafe fn _mm256_storeu_epi16(mem_addr: *mut i16, a: __m256i) {
/// Store 128-bits (composed of 8 packed 16-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_epi16&expand=5618)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_epi16&expand=5618)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu16
@@ -4202,7 +4202,7 @@ pub unsafe fn _mm_storeu_epi16(mem_addr: *mut i16, a: __m128i) {
/// Store 512-bits (composed of 64 packed 8-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_storeu_epi8&expand=5640)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_epi8&expand=5640)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8
@@ -4212,7 +4212,7 @@ pub unsafe fn _mm512_storeu_epi8(mem_addr: *mut i8, a: __m512i) {
/// Store 256-bits (composed of 32 packed 8-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu_epi8&expand=5638)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu_epi8&expand=5638)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8
@@ -4222,7 +4222,7 @@ pub unsafe fn _mm256_storeu_epi8(mem_addr: *mut i8, a: __m256i) {
/// Store 128-bits (composed of 16 packed 8-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_epi8&expand=5636)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_epi8&expand=5636)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu8
@@ -4234,7 +4234,7 @@ pub unsafe fn _mm_storeu_epi8(mem_addr: *mut i8, a: __m128i) {
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_loadu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_loadu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512bw")]
pub unsafe fn _mm512_mask_loadu_epi16(src: __m512i, k: __mmask32, mem_addr: *const i16) -> __m512i {
@@ -4253,7 +4253,7 @@ pub unsafe fn _mm512_mask_loadu_epi16(src: __m512i, k: __mmask32, mem_addr: *con
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_loadu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_loadu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512bw")]
pub unsafe fn _mm512_maskz_loadu_epi16(k: __mmask32, mem_addr: *const i16) -> __m512i {
@@ -4272,7 +4272,7 @@ pub unsafe fn _mm512_maskz_loadu_epi16(k: __mmask32, mem_addr: *const i16) -> __
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_loadu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_loadu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw")]
pub unsafe fn _mm512_mask_loadu_epi8(src: __m512i, k: __mmask64, mem_addr: *const i8) -> __m512i {
@@ -4291,7 +4291,7 @@ pub unsafe fn _mm512_mask_loadu_epi8(src: __m512i, k: __mmask64, mem_addr: *cons
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_loadu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_loadu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw")]
pub unsafe fn _mm512_maskz_loadu_epi8(k: __mmask64, mem_addr: *const i8) -> __m512i {
@@ -4310,7 +4310,7 @@ pub unsafe fn _mm512_maskz_loadu_epi8(k: __mmask64, mem_addr: *const i8) -> __m5
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_loadu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_loadu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")]
pub unsafe fn _mm256_mask_loadu_epi16(src: __m256i, k: __mmask16, mem_addr: *const i16) -> __m256i {
@@ -4329,7 +4329,7 @@ pub unsafe fn _mm256_mask_loadu_epi16(src: __m256i, k: __mmask16, mem_addr: *con
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_loadu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_loadu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")]
pub unsafe fn _mm256_maskz_loadu_epi16(k: __mmask16, mem_addr: *const i16) -> __m256i {
@@ -4348,7 +4348,7 @@ pub unsafe fn _mm256_maskz_loadu_epi16(k: __mmask16, mem_addr: *const i16) -> __
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_loadu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_loadu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")]
pub unsafe fn _mm256_mask_loadu_epi8(src: __m256i, k: __mmask32, mem_addr: *const i8) -> __m256i {
@@ -4367,7 +4367,7 @@ pub unsafe fn _mm256_mask_loadu_epi8(src: __m256i, k: __mmask32, mem_addr: *cons
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_loadu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_loadu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")]
pub unsafe fn _mm256_maskz_loadu_epi8(k: __mmask32, mem_addr: *const i8) -> __m256i {
@@ -4386,7 +4386,7 @@ pub unsafe fn _mm256_maskz_loadu_epi8(k: __mmask32, mem_addr: *const i8) -> __m2
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_loadu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_loadu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_loadu_epi16(src: __m128i, k: __mmask8, mem_addr: *const i16) -> __m128i {
@@ -4405,7 +4405,7 @@ pub unsafe fn _mm_mask_loadu_epi16(src: __m128i, k: __mmask8, mem_addr: *const i
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_loadu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_loadu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_loadu_epi16(k: __mmask8, mem_addr: *const i16) -> __m128i {
@@ -4424,7 +4424,7 @@ pub unsafe fn _mm_maskz_loadu_epi16(k: __mmask8, mem_addr: *const i16) -> __m128
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_loadu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_loadu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_loadu_epi8(src: __m128i, k: __mmask16, mem_addr: *const i8) -> __m128i {
@@ -4443,7 +4443,7 @@ pub unsafe fn _mm_mask_loadu_epi8(src: __m128i, k: __mmask16, mem_addr: *const i
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_loadu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_loadu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_loadu_epi8(k: __mmask16, mem_addr: *const i8) -> __m128i {
@@ -4461,7 +4461,7 @@ pub unsafe fn _mm_maskz_loadu_epi8(k: __mmask16, mem_addr: *const i8) -> __m128i
/// Store packed 16-bit integers from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_storeu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_storeu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512bw")]
pub unsafe fn _mm512_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask32, a: __m512i) {
@@ -4477,7 +4477,7 @@ pub unsafe fn _mm512_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask32, a: _
/// Store packed 8-bit integers from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_storeu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_storeu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw")]
pub unsafe fn _mm512_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask64, a: __m512i) {
@@ -4493,7 +4493,7 @@ pub unsafe fn _mm512_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask64, a: __m
/// Store packed 16-bit integers from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_storeu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_storeu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")]
pub unsafe fn _mm256_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask16, a: __m256i) {
@@ -4509,7 +4509,7 @@ pub unsafe fn _mm256_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask16, a: _
/// Store packed 8-bit integers from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_storeu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_storeu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx")]
pub unsafe fn _mm256_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask32, a: __m256i) {
@@ -4525,7 +4525,7 @@ pub unsafe fn _mm256_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask32, a: __m
/// Store packed 16-bit integers from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_storeu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_storeu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask8, a: __m128i) {
@@ -4541,7 +4541,7 @@ pub unsafe fn _mm_mask_storeu_epi16(mem_addr: *mut i16, mask: __mmask8, a: __m12
/// Store packed 8-bit integers from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_storeu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_storeu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask16, a: __m128i) {
@@ -4556,7 +4556,7 @@ pub unsafe fn _mm_mask_storeu_epi8(mem_addr: *mut i8, mask: __mmask16, a: __m128
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_madd_epi16&expand=3511)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_madd_epi16&expand=3511)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaddwd))]
@@ -4566,7 +4566,7 @@ pub unsafe fn _mm512_madd_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_madd_epi16&expand=3512)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_madd_epi16&expand=3512)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaddwd))]
@@ -4582,7 +4582,7 @@ pub unsafe fn _mm512_mask_madd_epi16(
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_madd_epi16&expand=3513)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_madd_epi16&expand=3513)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaddwd))]
@@ -4594,7 +4594,7 @@ pub unsafe fn _mm512_maskz_madd_epi16(k: __mmask16, a: __m512i, b: __m512i) -> _
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_madd_epi16&expand=3509)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_madd_epi16&expand=3509)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaddwd))]
@@ -4605,7 +4605,7 @@ pub unsafe fn _mm256_mask_madd_epi16(src: __m256i, k: __mmask8, a: __m256i, b: _
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_madd_epi16&expand=3510)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_madd_epi16&expand=3510)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaddwd))]
@@ -4617,7 +4617,7 @@ pub unsafe fn _mm256_maskz_madd_epi16(k: __mmask8, a: __m256i, b: __m256i) -> __
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_madd_epi16&expand=3506)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_madd_epi16&expand=3506)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaddwd))]
@@ -4628,7 +4628,7 @@ pub unsafe fn _mm_mask_madd_epi16(src: __m128i, k: __mmask8, a: __m128i, b: __m1
/// Multiply packed signed 16-bit integers in a and b, producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_madd_epi16&expand=3507)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_madd_epi16&expand=3507)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaddwd))]
@@ -4640,7 +4640,7 @@ pub unsafe fn _mm_maskz_madd_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m12
/// Vertically multiply each unsigned 8-bit integer from a with the corresponding signed 8-bit integer from b, producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maddubs_epi16&expand=3539)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maddubs_epi16&expand=3539)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaddubsw))]
@@ -4650,7 +4650,7 @@ pub unsafe fn _mm512_maddubs_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Multiply packed unsigned 8-bit integers in a by packed signed 8-bit integers in b, producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_maddubs_epi16&expand=3540)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_maddubs_epi16&expand=3540)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaddubsw))]
@@ -4666,7 +4666,7 @@ pub unsafe fn _mm512_mask_maddubs_epi16(
/// Multiply packed unsigned 8-bit integers in a by packed signed 8-bit integers in b, producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_maddubs_epi16&expand=3541)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_maddubs_epi16&expand=3541)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmaddubsw))]
@@ -4678,7 +4678,7 @@ pub unsafe fn _mm512_maskz_maddubs_epi16(k: __mmask32, a: __m512i, b: __m512i) -
/// Multiply packed unsigned 8-bit integers in a by packed signed 8-bit integers in b, producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_maddubs_epi16&expand=3537)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_maddubs_epi16&expand=3537)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaddubsw))]
@@ -4694,7 +4694,7 @@ pub unsafe fn _mm256_mask_maddubs_epi16(
/// Multiply packed unsigned 8-bit integers in a by packed signed 8-bit integers in b, producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_maddubs_epi16&expand=3538)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_maddubs_epi16&expand=3538)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaddubsw))]
@@ -4706,7 +4706,7 @@ pub unsafe fn _mm256_maskz_maddubs_epi16(k: __mmask16, a: __m256i, b: __m256i) -
/// Multiply packed unsigned 8-bit integers in a by packed signed 8-bit integers in b, producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_maddubs_epi16&expand=3534)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_maddubs_epi16&expand=3534)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaddubsw))]
@@ -4717,7 +4717,7 @@ pub unsafe fn _mm_mask_maddubs_epi16(src: __m128i, k: __mmask8, a: __m128i, b: _
/// Multiply packed unsigned 8-bit integers in a by packed signed 8-bit integers in b, producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_maddubs_epi16&expand=3535)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_maddubs_epi16&expand=3535)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaddubsw))]
@@ -4729,7 +4729,7 @@ pub unsafe fn _mm_maskz_maddubs_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_packs_epi32&expand=4091)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_packs_epi32&expand=4091)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpackssdw))]
@@ -4739,7 +4739,7 @@ pub unsafe fn _mm512_packs_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_packs_epi32&expand=4089)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_packs_epi32&expand=4089)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpackssdw))]
@@ -4755,7 +4755,7 @@ pub unsafe fn _mm512_mask_packs_epi32(
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_packs_epi32&expand=4090)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_packs_epi32&expand=4090)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpackssdw))]
@@ -4767,7 +4767,7 @@ pub unsafe fn _mm512_maskz_packs_epi32(k: __mmask32, a: __m512i, b: __m512i) ->
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_packs_epi32&expand=4086)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_packs_epi32&expand=4086)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpackssdw))]
@@ -4783,7 +4783,7 @@ pub unsafe fn _mm256_mask_packs_epi32(
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_packs_epi32&expand=4087)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_packs_epi32&expand=4087)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpackssdw))]
@@ -4795,7 +4795,7 @@ pub unsafe fn _mm256_maskz_packs_epi32(k: __mmask16, a: __m256i, b: __m256i) ->
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_packs_epi32&expand=4083)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_packs_epi32&expand=4083)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpackssdw))]
@@ -4806,7 +4806,7 @@ pub unsafe fn _mm_mask_packs_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_packs_epi32&expand=4084)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_packs_epi32&expand=4084)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpackssdw))]
@@ -4818,7 +4818,7 @@ pub unsafe fn _mm_maskz_packs_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m1
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_packs_epi16&expand=4082)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_packs_epi16&expand=4082)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpacksswb))]
@@ -4828,7 +4828,7 @@ pub unsafe fn _mm512_packs_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_packs_epi16&expand=4080)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_packs_epi16&expand=4080)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpacksswb))]
@@ -4844,7 +4844,7 @@ pub unsafe fn _mm512_mask_packs_epi16(
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_packs_epi16&expand=4081)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_packs_epi16&expand=4081)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpacksswb))]
@@ -4856,7 +4856,7 @@ pub unsafe fn _mm512_maskz_packs_epi16(k: __mmask64, a: __m512i, b: __m512i) ->
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_packs_epi16&expand=4077)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_packs_epi16&expand=4077)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpacksswb))]
@@ -4872,7 +4872,7 @@ pub unsafe fn _mm256_mask_packs_epi16(
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=#text=_mm256_maskz_packs_epi16&expand=4078)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=#text=_mm256_maskz_packs_epi16&expand=4078)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpacksswb))]
@@ -4884,7 +4884,7 @@ pub unsafe fn _mm256_maskz_packs_epi16(k: __mmask32, a: __m256i, b: __m256i) ->
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_packs_epi16&expand=4074)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_packs_epi16&expand=4074)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpacksswb))]
@@ -4895,7 +4895,7 @@ pub unsafe fn _mm_mask_packs_epi16(src: __m128i, k: __mmask16, a: __m128i, b: __
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_packs_epi16&expand=4075)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_packs_epi16&expand=4075)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpacksswb))]
@@ -4907,7 +4907,7 @@ pub unsafe fn _mm_maskz_packs_epi16(k: __mmask16, a: __m128i, b: __m128i) -> __m
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_packus_epi32&expand=4130)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_packus_epi32&expand=4130)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpackusdw))]
@@ -4917,7 +4917,7 @@ pub unsafe fn _mm512_packus_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_packus_epi32&expand=4128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_packus_epi32&expand=4128)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpackusdw))]
@@ -4933,7 +4933,7 @@ pub unsafe fn _mm512_mask_packus_epi32(
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_packus_epi32&expand=4129)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_packus_epi32&expand=4129)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpackusdw))]
@@ -4945,7 +4945,7 @@ pub unsafe fn _mm512_maskz_packus_epi32(k: __mmask32, a: __m512i, b: __m512i) ->
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_packus_epi32&expand=4125)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_packus_epi32&expand=4125)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpackusdw))]
@@ -4961,7 +4961,7 @@ pub unsafe fn _mm256_mask_packus_epi32(
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_packus_epi32&expand=4126)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_packus_epi32&expand=4126)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpackusdw))]
@@ -4973,7 +4973,7 @@ pub unsafe fn _mm256_maskz_packus_epi32(k: __mmask16, a: __m256i, b: __m256i) ->
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_packus_epi32&expand=4122)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_packus_epi32&expand=4122)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpackusdw))]
@@ -4984,7 +4984,7 @@ pub unsafe fn _mm_mask_packus_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __
/// Convert packed signed 32-bit integers from a and b to packed 16-bit integers using unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_packus_epi32&expand=4123)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_packus_epi32&expand=4123)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpackusdw))]
@@ -4996,7 +4996,7 @@ pub unsafe fn _mm_maskz_packus_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_packus_epi16&expand=4121)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_packus_epi16&expand=4121)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpackuswb))]
@@ -5006,7 +5006,7 @@ pub unsafe fn _mm512_packus_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_packus_epi16&expand=4119)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_packus_epi16&expand=4119)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpackuswb))]
@@ -5022,7 +5022,7 @@ pub unsafe fn _mm512_mask_packus_epi16(
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_packus_epi16&expand=4120)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_packus_epi16&expand=4120)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpackuswb))]
@@ -5034,7 +5034,7 @@ pub unsafe fn _mm512_maskz_packus_epi16(k: __mmask64, a: __m512i, b: __m512i) ->
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_packus_epi16&expand=4116)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_packus_epi16&expand=4116)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpackuswb))]
@@ -5050,7 +5050,7 @@ pub unsafe fn _mm256_mask_packus_epi16(
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_packus_epi16&expand=4117)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_packus_epi16&expand=4117)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpackuswb))]
@@ -5062,7 +5062,7 @@ pub unsafe fn _mm256_maskz_packus_epi16(k: __mmask32, a: __m256i, b: __m256i) ->
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_packus_epi16&expand=4113)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_packus_epi16&expand=4113)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpackuswb))]
@@ -5073,7 +5073,7 @@ pub unsafe fn _mm_mask_packus_epi16(src: __m128i, k: __mmask16, a: __m128i, b: _
/// Convert packed signed 16-bit integers from a and b to packed 8-bit integers using unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_packus_epi16&expand=4114)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_packus_epi16&expand=4114)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpackuswb))]
@@ -5085,7 +5085,7 @@ pub unsafe fn _mm_maskz_packus_epi16(k: __mmask16, a: __m128i, b: __m128i) -> __
/// Average packed unsigned 16-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_avg_epu16&expand=388)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_avg_epu16&expand=388)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpavgw))]
@@ -5095,7 +5095,7 @@ pub unsafe fn _mm512_avg_epu16(a: __m512i, b: __m512i) -> __m512i {
/// Average packed unsigned 16-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_avg_epu16&expand=389)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_avg_epu16&expand=389)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpavgw))]
@@ -5106,7 +5106,7 @@ pub unsafe fn _mm512_mask_avg_epu16(src: __m512i, k: __mmask32, a: __m512i, b: _
/// Average packed unsigned 16-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_avg_epu16&expand=390)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_avg_epu16&expand=390)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpavgw))]
@@ -5118,7 +5118,7 @@ pub unsafe fn _mm512_maskz_avg_epu16(k: __mmask32, a: __m512i, b: __m512i) -> __
/// Average packed unsigned 16-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_avg_epu16&expand=386)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_avg_epu16&expand=386)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpavgw))]
@@ -5129,7 +5129,7 @@ pub unsafe fn _mm256_mask_avg_epu16(src: __m256i, k: __mmask16, a: __m256i, b: _
/// Average packed unsigned 16-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_avg_epu16&expand=387)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_avg_epu16&expand=387)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpavgw))]
@@ -5141,7 +5141,7 @@ pub unsafe fn _mm256_maskz_avg_epu16(k: __mmask16, a: __m256i, b: __m256i) -> __
/// Average packed unsigned 16-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_avg_epu16&expand=383)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_avg_epu16&expand=383)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpavgw))]
@@ -5152,7 +5152,7 @@ pub unsafe fn _mm_mask_avg_epu16(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Average packed unsigned 16-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_avg_epu16&expand=384)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_avg_epu16&expand=384)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpavgw))]
@@ -5164,7 +5164,7 @@ pub unsafe fn _mm_maskz_avg_epu16(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Average packed unsigned 8-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_avg_epu8&expand=397)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_avg_epu8&expand=397)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpavgb))]
@@ -5174,7 +5174,7 @@ pub unsafe fn _mm512_avg_epu8(a: __m512i, b: __m512i) -> __m512i {
/// Average packed unsigned 8-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_avg_epu8&expand=398)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_avg_epu8&expand=398)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpavgb))]
@@ -5185,7 +5185,7 @@ pub unsafe fn _mm512_mask_avg_epu8(src: __m512i, k: __mmask64, a: __m512i, b: __
/// Average packed unsigned 8-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_avg_epu8&expand=399)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_avg_epu8&expand=399)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpavgb))]
@@ -5197,7 +5197,7 @@ pub unsafe fn _mm512_maskz_avg_epu8(k: __mmask64, a: __m512i, b: __m512i) -> __m
/// Average packed unsigned 8-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_avg_epu8&expand=395)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_avg_epu8&expand=395)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpavgb))]
@@ -5208,7 +5208,7 @@ pub unsafe fn _mm256_mask_avg_epu8(src: __m256i, k: __mmask32, a: __m256i, b: __
/// Average packed unsigned 8-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_avg_epu8&expand=396)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_avg_epu8&expand=396)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpavgb))]
@@ -5220,7 +5220,7 @@ pub unsafe fn _mm256_maskz_avg_epu8(k: __mmask32, a: __m256i, b: __m256i) -> __m
/// Average packed unsigned 8-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_avg_epu8&expand=392)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_avg_epu8&expand=392)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpavgb))]
@@ -5231,7 +5231,7 @@ pub unsafe fn _mm_mask_avg_epu8(src: __m128i, k: __mmask16, a: __m128i, b: __m12
/// Average packed unsigned 8-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_avg_epu8&expand=393)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_avg_epu8&expand=393)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpavgb))]
@@ -5243,7 +5243,7 @@ pub unsafe fn _mm_maskz_avg_epu8(k: __mmask16, a: __m128i, b: __m128i) -> __m128
/// Shift packed 16-bit integers in a left by count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sll_epi16&expand=5271)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sll_epi16&expand=5271)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsllw))]
@@ -5253,7 +5253,7 @@ pub unsafe fn _mm512_sll_epi16(a: __m512i, count: __m128i) -> __m512i {
/// Shift packed 16-bit integers in a left by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sll_epi16&expand=5269)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sll_epi16&expand=5269)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsllw))]
@@ -5269,7 +5269,7 @@ pub unsafe fn _mm512_mask_sll_epi16(
/// Shift packed 16-bit integers in a left by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sll_epi16&expand=5270)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sll_epi16&expand=5270)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsllw))]
@@ -5281,7 +5281,7 @@ pub unsafe fn _mm512_maskz_sll_epi16(k: __mmask32, a: __m512i, count: __m128i) -
/// Shift packed 16-bit integers in a left by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sll_epi16&expand=5266)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sll_epi16&expand=5266)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllw))]
@@ -5297,7 +5297,7 @@ pub unsafe fn _mm256_mask_sll_epi16(
/// Shift packed 16-bit integers in a left by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sll_epi16&expand=5267)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sll_epi16&expand=5267)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllw))]
@@ -5309,7 +5309,7 @@ pub unsafe fn _mm256_maskz_sll_epi16(k: __mmask16, a: __m256i, count: __m128i) -
/// Shift packed 16-bit integers in a left by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sll_epi16&expand=5263)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sll_epi16&expand=5263)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllw))]
@@ -5320,7 +5320,7 @@ pub unsafe fn _mm_mask_sll_epi16(src: __m128i, k: __mmask8, a: __m128i, count: _
/// Shift packed 16-bit integers in a left by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sll_epi16&expand=5264)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sll_epi16&expand=5264)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllw))]
@@ -5332,13 +5332,13 @@ pub unsafe fn _mm_maskz_sll_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_slli_epi16&expand=5301)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_slli_epi16&expand=5301)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_slli_epi16<const IMM8: u32>(a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x32();
let r = vpslliw(a, IMM8);
transmute(r)
@@ -5346,7 +5346,7 @@ pub unsafe fn _mm512_slli_epi16<const IMM8: u32>(a: __m512i) -> __m512i {
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_slli_epi16&expand=5299)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_slli_epi16&expand=5299)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))]
@@ -5356,7 +5356,7 @@ pub unsafe fn _mm512_mask_slli_epi16<const IMM8: u32>(
k: __mmask32,
a: __m512i,
) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x32();
let shf = vpslliw(a, IMM8);
transmute(simd_select_bitmask(k, shf, src.as_i16x32()))
@@ -5364,13 +5364,13 @@ pub unsafe fn _mm512_mask_slli_epi16<const IMM8: u32>(
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_slli_epi16&expand=5300)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_slli_epi16&expand=5300)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_slli_epi16<const IMM8: u32>(k: __mmask32, a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x32();
let shf = vpslliw(a, IMM8);
let zero = _mm512_setzero_si512().as_i16x32();
@@ -5379,7 +5379,7 @@ pub unsafe fn _mm512_maskz_slli_epi16<const IMM8: u32>(k: __mmask32, a: __m512i)
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_slli_epi16&expand=5296)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_slli_epi16&expand=5296)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))]
@@ -5389,7 +5389,7 @@ pub unsafe fn _mm256_mask_slli_epi16<const IMM8: u32>(
k: __mmask16,
a: __m256i,
) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = pslliw256(a.as_i16x16(), imm8);
transmute(simd_select_bitmask(k, r, src.as_i16x16()))
@@ -5397,13 +5397,13 @@ pub unsafe fn _mm256_mask_slli_epi16<const IMM8: u32>(
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_slli_epi16&expand=5297)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_slli_epi16&expand=5297)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_slli_epi16<const IMM8: u32>(k: __mmask16, a: __m256i) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = pslliw256(a.as_i16x16(), imm8);
let zero = _mm256_setzero_si256().as_i16x16();
@@ -5412,7 +5412,7 @@ pub unsafe fn _mm256_maskz_slli_epi16<const IMM8: u32>(k: __mmask16, a: __m256i)
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_slli_epi16&expand=5293)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_slli_epi16&expand=5293)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))]
@@ -5422,7 +5422,7 @@ pub unsafe fn _mm_mask_slli_epi16<const IMM8: u32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = pslliw128(a.as_i16x8(), imm8);
transmute(simd_select_bitmask(k, r, src.as_i16x8()))
@@ -5430,13 +5430,13 @@ pub unsafe fn _mm_mask_slli_epi16<const IMM8: u32>(
/// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_slli_epi16&expand=5294)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_slli_epi16&expand=5294)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_slli_epi16<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = pslliw128(a.as_i16x8(), imm8);
let zero = _mm_setzero_si128().as_i16x8();
@@ -5445,7 +5445,7 @@ pub unsafe fn _mm_maskz_slli_epi16<const IMM8: u32>(k: __mmask8, a: __m128i) ->
/// Shift packed 16-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sllv_epi16&expand=5333)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sllv_epi16&expand=5333)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsllvw))]
@@ -5455,7 +5455,7 @@ pub unsafe fn _mm512_sllv_epi16(a: __m512i, count: __m512i) -> __m512i {
/// Shift packed 16-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sllv_epi16&expand=5331)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sllv_epi16&expand=5331)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsllvw))]
@@ -5471,7 +5471,7 @@ pub unsafe fn _mm512_mask_sllv_epi16(
/// Shift packed 16-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sllv_epi16&expand=5332)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sllv_epi16&expand=5332)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsllvw))]
@@ -5483,7 +5483,7 @@ pub unsafe fn _mm512_maskz_sllv_epi16(k: __mmask32, a: __m512i, count: __m512i)
/// Shift packed 16-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sllv_epi16&expand=5330)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sllv_epi16&expand=5330)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvw))]
@@ -5493,7 +5493,7 @@ pub unsafe fn _mm256_sllv_epi16(a: __m256i, count: __m256i) -> __m256i {
/// Shift packed 16-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sllv_epi16&expand=5328)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sllv_epi16&expand=5328)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvw))]
@@ -5509,7 +5509,7 @@ pub unsafe fn _mm256_mask_sllv_epi16(
/// Shift packed 16-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sllv_epi16&expand=5329)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sllv_epi16&expand=5329)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvw))]
@@ -5521,7 +5521,7 @@ pub unsafe fn _mm256_maskz_sllv_epi16(k: __mmask16, a: __m256i, count: __m256i)
/// Shift packed 16-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sllv_epi16&expand=5327)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sllv_epi16&expand=5327)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvw))]
@@ -5531,7 +5531,7 @@ pub unsafe fn _mm_sllv_epi16(a: __m128i, count: __m128i) -> __m128i {
/// Shift packed 16-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sllv_epi16&expand=5325)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sllv_epi16&expand=5325)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvw))]
@@ -5547,7 +5547,7 @@ pub unsafe fn _mm_mask_sllv_epi16(
/// Shift packed 16-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sllv_epi16&expand=5326)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sllv_epi16&expand=5326)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvw))]
@@ -5559,7 +5559,7 @@ pub unsafe fn _mm_maskz_sllv_epi16(k: __mmask8, a: __m128i, count: __m128i) -> _
/// Shift packed 16-bit integers in a right by count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srl_epi16&expand=5483)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srl_epi16&expand=5483)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsrlw))]
@@ -5569,7 +5569,7 @@ pub unsafe fn _mm512_srl_epi16(a: __m512i, count: __m128i) -> __m512i {
/// Shift packed 16-bit integers in a right by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srl_epi16&expand=5481)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srl_epi16&expand=5481)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsrlw))]
@@ -5585,7 +5585,7 @@ pub unsafe fn _mm512_mask_srl_epi16(
/// Shift packed 16-bit integers in a right by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srl_epi16&expand=5482)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srl_epi16&expand=5482)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsrlw))]
@@ -5597,7 +5597,7 @@ pub unsafe fn _mm512_maskz_srl_epi16(k: __mmask32, a: __m512i, count: __m128i) -
/// Shift packed 16-bit integers in a right by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srl_epi16&expand=5478)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srl_epi16&expand=5478)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlw))]
@@ -5613,7 +5613,7 @@ pub unsafe fn _mm256_mask_srl_epi16(
/// Shift packed 16-bit integers in a right by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srl_epi16&expand=5479)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srl_epi16&expand=5479)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlw))]
@@ -5625,7 +5625,7 @@ pub unsafe fn _mm256_maskz_srl_epi16(k: __mmask16, a: __m256i, count: __m128i) -
/// Shift packed 16-bit integers in a right by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srl_epi16&expand=5475)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srl_epi16&expand=5475)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlw))]
@@ -5636,7 +5636,7 @@ pub unsafe fn _mm_mask_srl_epi16(src: __m128i, k: __mmask8, a: __m128i, count: _
/// Shift packed 16-bit integers in a right by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srl_epi16&expand=5476)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srl_epi16&expand=5476)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlw))]
@@ -5648,13 +5648,13 @@ pub unsafe fn _mm_maskz_srl_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __
/// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srli_epi16&expand=5513)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srli_epi16&expand=5513)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_srli_epi16<const IMM8: u32>(a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x32();
let r = vpsrliw(a, IMM8);
transmute(r)
@@ -5662,7 +5662,7 @@ pub unsafe fn _mm512_srli_epi16<const IMM8: u32>(a: __m512i) -> __m512i {
/// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srli_epi16&expand=5511)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srli_epi16&expand=5511)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))]
@@ -5672,7 +5672,7 @@ pub unsafe fn _mm512_mask_srli_epi16<const IMM8: u32>(
k: __mmask32,
a: __m512i,
) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x32();
let shf = vpsrliw(a, IMM8);
transmute(simd_select_bitmask(k, shf, src.as_i16x32()))
@@ -5680,13 +5680,13 @@ pub unsafe fn _mm512_mask_srli_epi16<const IMM8: u32>(
/// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srli_epi16&expand=5512)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srli_epi16&expand=5512)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_srli_epi16<const IMM8: i32>(k: __mmask32, a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
//imm8 should be u32, it seems the document to verify is incorrect
let a = a.as_i16x32();
let shf = vpsrliw(a, IMM8 as u32);
@@ -5696,7 +5696,7 @@ pub unsafe fn _mm512_maskz_srli_epi16<const IMM8: i32>(k: __mmask32, a: __m512i)
/// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srli_epi16&expand=5508)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srli_epi16&expand=5508)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))]
@@ -5706,20 +5706,20 @@ pub unsafe fn _mm256_mask_srli_epi16<const IMM8: i32>(
k: __mmask16,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf = _mm256_srli_epi16::<IMM8>(a);
transmute(simd_select_bitmask(k, shf.as_i16x16(), src.as_i16x16()))
}
/// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srli_epi16&expand=5509)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srli_epi16&expand=5509)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_srli_epi16<const IMM8: i32>(k: __mmask16, a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf = _mm256_srli_epi16::<IMM8>(a);
let zero = _mm256_setzero_si256().as_i16x16();
transmute(simd_select_bitmask(k, shf.as_i16x16(), zero))
@@ -5727,7 +5727,7 @@ pub unsafe fn _mm256_maskz_srli_epi16<const IMM8: i32>(k: __mmask16, a: __m256i)
/// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srli_epi16&expand=5505)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srli_epi16&expand=5505)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))]
@@ -5737,20 +5737,20 @@ pub unsafe fn _mm_mask_srli_epi16<const IMM8: i32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf = _mm_srli_epi16::<IMM8>(a);
transmute(simd_select_bitmask(k, shf.as_i16x8(), src.as_i16x8()))
}
/// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srli_epi16&expand=5506)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srli_epi16&expand=5506)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_srli_epi16<const IMM8: i32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf = _mm_srli_epi16::<IMM8>(a);
let zero = _mm_setzero_si128().as_i16x8();
transmute(simd_select_bitmask(k, shf.as_i16x8(), zero))
@@ -5758,7 +5758,7 @@ pub unsafe fn _mm_maskz_srli_epi16<const IMM8: i32>(k: __mmask8, a: __m128i) ->
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srlv_epi16&expand=5545)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srlv_epi16&expand=5545)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsrlvw))]
@@ -5768,7 +5768,7 @@ pub unsafe fn _mm512_srlv_epi16(a: __m512i, count: __m512i) -> __m512i {
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srlv_epi16&expand=5543)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srlv_epi16&expand=5543)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsrlvw))]
@@ -5784,7 +5784,7 @@ pub unsafe fn _mm512_mask_srlv_epi16(
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srlv_epi16&expand=5544)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srlv_epi16&expand=5544)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsrlvw))]
@@ -5796,7 +5796,7 @@ pub unsafe fn _mm512_maskz_srlv_epi16(k: __mmask32, a: __m512i, count: __m512i)
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srlv_epi16&expand=5542)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srlv_epi16&expand=5542)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvw))]
@@ -5806,7 +5806,7 @@ pub unsafe fn _mm256_srlv_epi16(a: __m256i, count: __m256i) -> __m256i {
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srlv_epi16&expand=5540)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srlv_epi16&expand=5540)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvw))]
@@ -5822,7 +5822,7 @@ pub unsafe fn _mm256_mask_srlv_epi16(
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srlv_epi16&expand=5541)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srlv_epi16&expand=5541)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvw))]
@@ -5834,7 +5834,7 @@ pub unsafe fn _mm256_maskz_srlv_epi16(k: __mmask16, a: __m256i, count: __m256i)
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srlv_epi16&expand=5539)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srlv_epi16&expand=5539)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvw))]
@@ -5844,7 +5844,7 @@ pub unsafe fn _mm_srlv_epi16(a: __m128i, count: __m128i) -> __m128i {
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srlv_epi16&expand=5537)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srlv_epi16&expand=5537)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvw))]
@@ -5860,7 +5860,7 @@ pub unsafe fn _mm_mask_srlv_epi16(
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srlv_epi16&expand=5538)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srlv_epi16&expand=5538)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvw))]
@@ -5872,7 +5872,7 @@ pub unsafe fn _mm_maskz_srlv_epi16(k: __mmask8, a: __m128i, count: __m128i) -> _
/// Shift packed 16-bit integers in a right by count while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sra_epi16&expand=5398)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sra_epi16&expand=5398)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsraw))]
@@ -5882,7 +5882,7 @@ pub unsafe fn _mm512_sra_epi16(a: __m512i, count: __m128i) -> __m512i {
/// Shift packed 16-bit integers in a right by count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sra_epi16&expand=5396)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sra_epi16&expand=5396)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsraw))]
@@ -5898,7 +5898,7 @@ pub unsafe fn _mm512_mask_sra_epi16(
/// Shift packed 16-bit integers in a right by count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sra_epi16&expand=5397)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sra_epi16&expand=5397)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsraw))]
@@ -5910,7 +5910,7 @@ pub unsafe fn _mm512_maskz_sra_epi16(k: __mmask32, a: __m512i, count: __m128i) -
/// Shift packed 16-bit integers in a right by count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sra_epi16&expand=5393)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sra_epi16&expand=5393)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraw))]
@@ -5926,7 +5926,7 @@ pub unsafe fn _mm256_mask_sra_epi16(
/// Shift packed 16-bit integers in a right by count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sra_epi16&expand=5394)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sra_epi16&expand=5394)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraw))]
@@ -5938,7 +5938,7 @@ pub unsafe fn _mm256_maskz_sra_epi16(k: __mmask16, a: __m256i, count: __m128i) -
/// Shift packed 16-bit integers in a right by count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sra_epi16&expand=5390)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sra_epi16&expand=5390)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraw))]
@@ -5949,7 +5949,7 @@ pub unsafe fn _mm_mask_sra_epi16(src: __m128i, k: __mmask8, a: __m128i, count: _
/// Shift packed 16-bit integers in a right by count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sra_epi16&expand=5391)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sra_epi16&expand=5391)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraw))]
@@ -5961,13 +5961,13 @@ pub unsafe fn _mm_maskz_sra_epi16(k: __mmask8, a: __m128i, count: __m128i) -> __
/// Shift packed 16-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srai_epi16&expand=5427)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srai_epi16&expand=5427)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_srai_epi16<const IMM8: u32>(a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x32();
let r = vpsraiw(a, IMM8);
transmute(r)
@@ -5975,7 +5975,7 @@ pub unsafe fn _mm512_srai_epi16<const IMM8: u32>(a: __m512i) -> __m512i {
/// Shift packed 16-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srai_epi16&expand=5425)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srai_epi16&expand=5425)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))]
@@ -5985,7 +5985,7 @@ pub unsafe fn _mm512_mask_srai_epi16<const IMM8: u32>(
k: __mmask32,
a: __m512i,
) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x32();
let shf = vpsraiw(a, IMM8);
transmute(simd_select_bitmask(k, shf, src.as_i16x32()))
@@ -5993,13 +5993,13 @@ pub unsafe fn _mm512_mask_srai_epi16<const IMM8: u32>(
/// Shift packed 16-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srai_epi16&expand=5426)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srai_epi16&expand=5426)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_srai_epi16<const IMM8: u32>(k: __mmask32, a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x32();
let shf = vpsraiw(a, IMM8);
let zero = _mm512_setzero_si512().as_i16x32();
@@ -6008,7 +6008,7 @@ pub unsafe fn _mm512_maskz_srai_epi16<const IMM8: u32>(k: __mmask32, a: __m512i)
/// Shift packed 16-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srai_epi16&expand=5422)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srai_epi16&expand=5422)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))]
@@ -6018,7 +6018,7 @@ pub unsafe fn _mm256_mask_srai_epi16<const IMM8: u32>(
k: __mmask16,
a: __m256i,
) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psraiw256(a.as_i16x16(), imm8);
transmute(simd_select_bitmask(k, r, src.as_i16x16()))
@@ -6026,13 +6026,13 @@ pub unsafe fn _mm256_mask_srai_epi16<const IMM8: u32>(
/// Shift packed 16-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srai_epi16&expand=5423)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srai_epi16&expand=5423)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_srai_epi16<const IMM8: u32>(k: __mmask16, a: __m256i) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psraiw256(a.as_i16x16(), imm8);
let zero = _mm256_setzero_si256().as_i16x16();
@@ -6041,7 +6041,7 @@ pub unsafe fn _mm256_maskz_srai_epi16<const IMM8: u32>(k: __mmask16, a: __m256i)
/// Shift packed 16-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srai_epi16&expand=5419)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srai_epi16&expand=5419)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))]
@@ -6051,7 +6051,7 @@ pub unsafe fn _mm_mask_srai_epi16<const IMM8: u32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psraiw128(a.as_i16x8(), imm8);
transmute(simd_select_bitmask(k, r, src.as_i16x8()))
@@ -6059,13 +6059,13 @@ pub unsafe fn _mm_mask_srai_epi16<const IMM8: u32>(
/// Shift packed 16-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srai_epi16&expand=5420)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srai_epi16&expand=5420)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraw, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_srai_epi16<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psraiw128(a.as_i16x8(), imm8);
let zero = _mm_setzero_si128().as_i16x8();
@@ -6074,7 +6074,7 @@ pub unsafe fn _mm_maskz_srai_epi16<const IMM8: u32>(k: __mmask8, a: __m128i) ->
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srav_epi16&expand=5456)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srav_epi16&expand=5456)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsravw))]
@@ -6084,7 +6084,7 @@ pub unsafe fn _mm512_srav_epi16(a: __m512i, count: __m512i) -> __m512i {
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srav_epi16&expand=5454)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srav_epi16&expand=5454)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsravw))]
@@ -6100,7 +6100,7 @@ pub unsafe fn _mm512_mask_srav_epi16(
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srav_epi16&expand=5455)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srav_epi16&expand=5455)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsravw))]
@@ -6112,7 +6112,7 @@ pub unsafe fn _mm512_maskz_srav_epi16(k: __mmask32, a: __m512i, count: __m512i)
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srav_epi16&expand=5453)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srav_epi16&expand=5453)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravw))]
@@ -6122,7 +6122,7 @@ pub unsafe fn _mm256_srav_epi16(a: __m256i, count: __m256i) -> __m256i {
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srav_epi16&expand=5451)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srav_epi16&expand=5451)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravw))]
@@ -6138,7 +6138,7 @@ pub unsafe fn _mm256_mask_srav_epi16(
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srav_epi16&expand=5452)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srav_epi16&expand=5452)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravw))]
@@ -6150,7 +6150,7 @@ pub unsafe fn _mm256_maskz_srav_epi16(k: __mmask16, a: __m256i, count: __m256i)
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srav_epi16&expand=5450)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srav_epi16&expand=5450)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravw))]
@@ -6160,7 +6160,7 @@ pub unsafe fn _mm_srav_epi16(a: __m128i, count: __m128i) -> __m128i {
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srav_epi16&expand=5448)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srav_epi16&expand=5448)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravw))]
@@ -6176,7 +6176,7 @@ pub unsafe fn _mm_mask_srav_epi16(
/// Shift packed 16-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srav_epi16&expand=5449)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srav_epi16&expand=5449)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravw))]
@@ -6188,7 +6188,7 @@ pub unsafe fn _mm_maskz_srav_epi16(k: __mmask8, a: __m128i, count: __m128i) -> _
/// Shuffle 16-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_epi16&expand=4226)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex2var_epi16&expand=4226)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2w or vpermt2w
@@ -6198,7 +6198,7 @@ pub unsafe fn _mm512_permutex2var_epi16(a: __m512i, idx: __m512i, b: __m512i) ->
/// Shuffle 16-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutex2var_epi16&expand=4223)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex2var_epi16&expand=4223)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpermt2w))]
@@ -6214,7 +6214,7 @@ pub unsafe fn _mm512_mask_permutex2var_epi16(
/// Shuffle 16-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutex2var_epi16&expand=4225)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex2var_epi16&expand=4225)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2w or vpermt2w
@@ -6231,7 +6231,7 @@ pub unsafe fn _mm512_maskz_permutex2var_epi16(
/// Shuffle 16-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask2_permutex2var_epi16&expand=4224)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask2_permutex2var_epi16&expand=4224)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpermi2w))]
@@ -6247,7 +6247,7 @@ pub unsafe fn _mm512_mask2_permutex2var_epi16(
/// Shuffle 16-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_epi16&expand=4222)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex2var_epi16&expand=4222)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2w or vpermt2w
@@ -6257,7 +6257,7 @@ pub unsafe fn _mm256_permutex2var_epi16(a: __m256i, idx: __m256i, b: __m256i) ->
/// Shuffle 16-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutex2var_epi16&expand=4219)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex2var_epi16&expand=4219)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpermt2w))]
@@ -6273,7 +6273,7 @@ pub unsafe fn _mm256_mask_permutex2var_epi16(
/// Shuffle 16-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutex2var_epi16&expand=4221)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex2var_epi16&expand=4221)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2w or vpermt2w
@@ -6290,7 +6290,7 @@ pub unsafe fn _mm256_maskz_permutex2var_epi16(
/// Shuffle 16-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask2_permutex2var_epi16&expand=4220)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask2_permutex2var_epi16&expand=4220)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpermi2w))]
@@ -6306,7 +6306,7 @@ pub unsafe fn _mm256_mask2_permutex2var_epi16(
/// Shuffle 16-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_epi16&expand=4218)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutex2var_epi16&expand=4218)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2w or vpermt2w
@@ -6316,7 +6316,7 @@ pub unsafe fn _mm_permutex2var_epi16(a: __m128i, idx: __m128i, b: __m128i) -> __
/// Shuffle 16-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_permutex2var_epi16&expand=4215)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutex2var_epi16&expand=4215)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpermt2w))]
@@ -6332,7 +6332,7 @@ pub unsafe fn _mm_mask_permutex2var_epi16(
/// Shuffle 16-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_permutex2var_epi16&expand=4217)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutex2var_epi16&expand=4217)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2w or vpermt2w
@@ -6349,7 +6349,7 @@ pub unsafe fn _mm_maskz_permutex2var_epi16(
/// Shuffle 16-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask2_permutex2var_epi16&expand=4216)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask2_permutex2var_epi16&expand=4216)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpermi2w))]
@@ -6365,7 +6365,7 @@ pub unsafe fn _mm_mask2_permutex2var_epi16(
/// Shuffle 16-bit integers in a across lanes using the corresponding index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutexvar_epi16&expand=4295)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutexvar_epi16&expand=4295)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpermw))]
@@ -6375,7 +6375,7 @@ pub unsafe fn _mm512_permutexvar_epi16(idx: __m512i, a: __m512i) -> __m512i {
/// Shuffle 16-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutexvar_epi16&expand=4293)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutexvar_epi16&expand=4293)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpermw))]
@@ -6391,7 +6391,7 @@ pub unsafe fn _mm512_mask_permutexvar_epi16(
/// Shuffle 16-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutexvar_epi16&expand=4294)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutexvar_epi16&expand=4294)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpermw))]
@@ -6403,7 +6403,7 @@ pub unsafe fn _mm512_maskz_permutexvar_epi16(k: __mmask32, idx: __m512i, a: __m5
/// Shuffle 16-bit integers in a across lanes using the corresponding index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_epi16&expand=4292)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutexvar_epi16&expand=4292)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpermw))]
@@ -6413,7 +6413,7 @@ pub unsafe fn _mm256_permutexvar_epi16(idx: __m256i, a: __m256i) -> __m256i {
/// Shuffle 16-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutexvar_epi16&expand=4290)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutexvar_epi16&expand=4290)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpermw))]
@@ -6429,7 +6429,7 @@ pub unsafe fn _mm256_mask_permutexvar_epi16(
/// Shuffle 16-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutexvar_epi16&expand=4291)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutexvar_epi16&expand=4291)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpermw))]
@@ -6441,7 +6441,7 @@ pub unsafe fn _mm256_maskz_permutexvar_epi16(k: __mmask16, idx: __m256i, a: __m2
/// Shuffle 16-bit integers in a across lanes using the corresponding index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutexvar_epi16&expand=4289)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutexvar_epi16&expand=4289)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpermw))]
@@ -6451,7 +6451,7 @@ pub unsafe fn _mm_permutexvar_epi16(idx: __m128i, a: __m128i) -> __m128i {
/// Shuffle 16-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_permutexvar_epi16&expand=4287)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutexvar_epi16&expand=4287)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpermw))]
@@ -6467,7 +6467,7 @@ pub unsafe fn _mm_mask_permutexvar_epi16(
/// Shuffle 16-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_permutexvar_epi16&expand=4288)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutexvar_epi16&expand=4288)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpermw))]
@@ -6479,7 +6479,7 @@ pub unsafe fn _mm_maskz_permutexvar_epi16(k: __mmask8, idx: __m128i, a: __m128i)
/// Blend packed 16-bit integers from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_epi16&expand=430)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_blend_epi16&expand=430)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vmovdqu16))] //should be vpblendmw
@@ -6489,7 +6489,7 @@ pub unsafe fn _mm512_mask_blend_epi16(k: __mmask32, a: __m512i, b: __m512i) -> _
/// Blend packed 16-bit integers from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_epi16&expand=429)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_blend_epi16&expand=429)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqu16))] //should be vpblendmw
@@ -6499,7 +6499,7 @@ pub unsafe fn _mm256_mask_blend_epi16(k: __mmask16, a: __m256i, b: __m256i) -> _
/// Blend packed 16-bit integers from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_epi16&expand=427)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_blend_epi16&expand=427)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqu16))] //should be vpblendmw
@@ -6509,7 +6509,7 @@ pub unsafe fn _mm_mask_blend_epi16(k: __mmask8, a: __m128i, b: __m128i) -> __m12
/// Blend packed 8-bit integers from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_epi8&expand=441)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_blend_epi8&expand=441)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vmovdqu8))] //should be vpblendmb
@@ -6519,7 +6519,7 @@ pub unsafe fn _mm512_mask_blend_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __
/// Blend packed 8-bit integers from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_epi8&expand=440)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_blend_epi8&expand=440)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqu8))] //should be vpblendmb
@@ -6529,7 +6529,7 @@ pub unsafe fn _mm256_mask_blend_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __
/// Blend packed 8-bit integers from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_epi8&expand=439)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_blend_epi8&expand=439)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqu8))] //should be vpblendmb
@@ -6539,13 +6539,13 @@ pub unsafe fn _mm_mask_blend_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m12
/// Broadcast the low packed 16-bit integer from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_broadcastw_epi16&expand=587)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastw_epi16&expand=587)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
pub unsafe fn _mm512_broadcastw_epi16(a: __m128i) -> __m512i {
let a = _mm512_castsi128_si512(a).as_i16x32();
- let ret: i16x32 = simd_shuffle32!(
+ let ret: i16x32 = simd_shuffle!(
a,
a,
[
@@ -6558,7 +6558,7 @@ pub unsafe fn _mm512_broadcastw_epi16(a: __m128i) -> __m512i {
/// Broadcast the low packed 16-bit integer from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_broadcastw_epi16&expand=588)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcastw_epi16&expand=588)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
@@ -6569,7 +6569,7 @@ pub unsafe fn _mm512_mask_broadcastw_epi16(src: __m512i, k: __mmask32, a: __m128
/// Broadcast the low packed 16-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_broadcastw_epi16&expand=589)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcastw_epi16&expand=589)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
@@ -6581,7 +6581,7 @@ pub unsafe fn _mm512_maskz_broadcastw_epi16(k: __mmask32, a: __m128i) -> __m512i
/// Broadcast the low packed 16-bit integer from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_broadcastw_epi16&expand=585)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcastw_epi16&expand=585)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
@@ -6592,7 +6592,7 @@ pub unsafe fn _mm256_mask_broadcastw_epi16(src: __m256i, k: __mmask16, a: __m128
/// Broadcast the low packed 16-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_broadcastw_epi16&expand=586)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcastw_epi16&expand=586)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
@@ -6604,7 +6604,7 @@ pub unsafe fn _mm256_maskz_broadcastw_epi16(k: __mmask16, a: __m128i) -> __m256i
/// Broadcast the low packed 16-bit integer from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_broadcastw_epi16&expand=582)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_broadcastw_epi16&expand=582)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
@@ -6615,7 +6615,7 @@ pub unsafe fn _mm_mask_broadcastw_epi16(src: __m128i, k: __mmask8, a: __m128i) -
/// Broadcast the low packed 16-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_broadcastw_epi16&expand=583)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_broadcastw_epi16&expand=583)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
@@ -6627,13 +6627,13 @@ pub unsafe fn _mm_maskz_broadcastw_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Broadcast the low packed 8-bit integer from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_broadcastb_epi8&expand=536)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastb_epi8&expand=536)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
pub unsafe fn _mm512_broadcastb_epi8(a: __m128i) -> __m512i {
let a = _mm512_castsi128_si512(a).as_i8x64();
- let ret: i8x64 = simd_shuffle64!(
+ let ret: i8x64 = simd_shuffle!(
a,
a,
[
@@ -6647,7 +6647,7 @@ pub unsafe fn _mm512_broadcastb_epi8(a: __m128i) -> __m512i {
/// Broadcast the low packed 8-bit integer from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_broadcastb_epi8&expand=537)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcastb_epi8&expand=537)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
@@ -6658,7 +6658,7 @@ pub unsafe fn _mm512_mask_broadcastb_epi8(src: __m512i, k: __mmask64, a: __m128i
/// Broadcast the low packed 8-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_broadcastb_epi8&expand=538)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcastb_epi8&expand=538)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
@@ -6670,7 +6670,7 @@ pub unsafe fn _mm512_maskz_broadcastb_epi8(k: __mmask64, a: __m128i) -> __m512i
/// Broadcast the low packed 8-bit integer from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_broadcastb_epi8&expand=534)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcastb_epi8&expand=534)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
@@ -6681,7 +6681,7 @@ pub unsafe fn _mm256_mask_broadcastb_epi8(src: __m256i, k: __mmask32, a: __m128i
/// Broadcast the low packed 8-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_broadcastb_epi8&expand=535)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcastb_epi8&expand=535)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
@@ -6693,7 +6693,7 @@ pub unsafe fn _mm256_maskz_broadcastb_epi8(k: __mmask32, a: __m128i) -> __m256i
/// Broadcast the low packed 8-bit integer from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_broadcastb_epi8&expand=531)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_broadcastb_epi8&expand=531)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
@@ -6704,7 +6704,7 @@ pub unsafe fn _mm_mask_broadcastb_epi8(src: __m128i, k: __mmask16, a: __m128i) -
/// Broadcast the low packed 8-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_broadcastb_epi8&expand=532)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_broadcastb_epi8&expand=532)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
@@ -6716,7 +6716,7 @@ pub unsafe fn _mm_maskz_broadcastb_epi8(k: __mmask16, a: __m128i) -> __m128i {
/// Unpack and interleave 16-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_unpackhi_epi16&expand=6012)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpackhi_epi16&expand=6012)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpunpckhwd))]
@@ -6724,7 +6724,7 @@ pub unsafe fn _mm512_unpackhi_epi16(a: __m512i, b: __m512i) -> __m512i {
let a = a.as_i16x32();
let b = b.as_i16x32();
#[rustfmt::skip]
- let r: i16x32 = simd_shuffle32!(
+ let r: i16x32 = simd_shuffle!(
a,
b,
[
@@ -6743,7 +6743,7 @@ pub unsafe fn _mm512_unpackhi_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Unpack and interleave 16-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_unpackhi_epi16&expand=6010)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpackhi_epi16&expand=6010)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpunpckhwd))]
@@ -6759,7 +6759,7 @@ pub unsafe fn _mm512_mask_unpackhi_epi16(
/// Unpack and interleave 16-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_unpackhi_epi16&expand=6011)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpackhi_epi16&expand=6011)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpunpckhwd))]
@@ -6771,7 +6771,7 @@ pub unsafe fn _mm512_maskz_unpackhi_epi16(k: __mmask32, a: __m512i, b: __m512i)
/// Unpack and interleave 16-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_unpackhi_epi16&expand=6007)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpackhi_epi16&expand=6007)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhwd))]
@@ -6787,7 +6787,7 @@ pub unsafe fn _mm256_mask_unpackhi_epi16(
/// Unpack and interleave 16-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_unpackhi_epi16&expand=6008)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpackhi_epi16&expand=6008)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhwd))]
@@ -6799,7 +6799,7 @@ pub unsafe fn _mm256_maskz_unpackhi_epi16(k: __mmask16, a: __m256i, b: __m256i)
/// Unpack and interleave 16-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_unpackhi_epi16&expand=6004)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpackhi_epi16&expand=6004)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhwd))]
@@ -6815,7 +6815,7 @@ pub unsafe fn _mm_mask_unpackhi_epi16(
/// Unpack and interleave 16-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_unpackhi_epi16&expand=6005)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpackhi_epi16&expand=6005)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhwd))]
@@ -6827,7 +6827,7 @@ pub unsafe fn _mm_maskz_unpackhi_epi16(k: __mmask8, a: __m128i, b: __m128i) -> _
/// Unpack and interleave 8-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_unpackhi_epi8&expand=6039)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpackhi_epi8&expand=6039)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpunpckhbw))]
@@ -6835,7 +6835,7 @@ pub unsafe fn _mm512_unpackhi_epi8(a: __m512i, b: __m512i) -> __m512i {
let a = a.as_i8x64();
let b = b.as_i8x64();
#[rustfmt::skip]
- let r: i8x64 = simd_shuffle64!(
+ let r: i8x64 = simd_shuffle!(
a,
b,
[
@@ -6862,7 +6862,7 @@ pub unsafe fn _mm512_unpackhi_epi8(a: __m512i, b: __m512i) -> __m512i {
/// Unpack and interleave 8-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_unpackhi_epi8&expand=6037)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpackhi_epi8&expand=6037)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpunpckhbw))]
@@ -6878,7 +6878,7 @@ pub unsafe fn _mm512_mask_unpackhi_epi8(
/// Unpack and interleave 8-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_unpackhi_epi8&expand=6038)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpackhi_epi8&expand=6038)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpunpckhbw))]
@@ -6890,7 +6890,7 @@ pub unsafe fn _mm512_maskz_unpackhi_epi8(k: __mmask64, a: __m512i, b: __m512i) -
/// Unpack and interleave 8-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_unpackhi_epi8&expand=6034)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpackhi_epi8&expand=6034)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhbw))]
@@ -6906,7 +6906,7 @@ pub unsafe fn _mm256_mask_unpackhi_epi8(
/// Unpack and interleave 8-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_unpackhi_epi8&expand=6035)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpackhi_epi8&expand=6035)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhbw))]
@@ -6918,7 +6918,7 @@ pub unsafe fn _mm256_maskz_unpackhi_epi8(k: __mmask32, a: __m256i, b: __m256i) -
/// Unpack and interleave 8-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_unpackhi_epi8&expand=6031)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpackhi_epi8&expand=6031)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhbw))]
@@ -6934,7 +6934,7 @@ pub unsafe fn _mm_mask_unpackhi_epi8(
/// Unpack and interleave 8-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_unpackhi_epi8&expand=6032)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpackhi_epi8&expand=6032)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhbw))]
@@ -6946,7 +6946,7 @@ pub unsafe fn _mm_maskz_unpackhi_epi8(k: __mmask16, a: __m128i, b: __m128i) -> _
/// Unpack and interleave 16-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_unpacklo_epi16&expand=6069)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpacklo_epi16&expand=6069)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpunpcklwd))]
@@ -6954,7 +6954,7 @@ pub unsafe fn _mm512_unpacklo_epi16(a: __m512i, b: __m512i) -> __m512i {
let a = a.as_i16x32();
let b = b.as_i16x32();
#[rustfmt::skip]
- let r: i16x32 = simd_shuffle32!(
+ let r: i16x32 = simd_shuffle!(
a,
b,
[
@@ -6973,7 +6973,7 @@ pub unsafe fn _mm512_unpacklo_epi16(a: __m512i, b: __m512i) -> __m512i {
/// Unpack and interleave 16-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_unpacklo_epi16&expand=6067)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpacklo_epi16&expand=6067)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpunpcklwd))]
@@ -6989,7 +6989,7 @@ pub unsafe fn _mm512_mask_unpacklo_epi16(
/// Unpack and interleave 16-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_unpacklo_epi16&expand=6068)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpacklo_epi16&expand=6068)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpunpcklwd))]
@@ -7001,7 +7001,7 @@ pub unsafe fn _mm512_maskz_unpacklo_epi16(k: __mmask32, a: __m512i, b: __m512i)
/// Unpack and interleave 16-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_unpacklo_epi16&expand=6064)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpacklo_epi16&expand=6064)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpcklwd))]
@@ -7017,7 +7017,7 @@ pub unsafe fn _mm256_mask_unpacklo_epi16(
/// Unpack and interleave 16-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_unpacklo_epi16&expand=6065)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpacklo_epi16&expand=6065)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpcklwd))]
@@ -7029,7 +7029,7 @@ pub unsafe fn _mm256_maskz_unpacklo_epi16(k: __mmask16, a: __m256i, b: __m256i)
/// Unpack and interleave 16-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_unpacklo_epi16&expand=6061)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpacklo_epi16&expand=6061)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpcklwd))]
@@ -7045,7 +7045,7 @@ pub unsafe fn _mm_mask_unpacklo_epi16(
/// Unpack and interleave 16-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_unpacklo_epi16&expand=6062)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpacklo_epi16&expand=6062)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpcklwd))]
@@ -7057,7 +7057,7 @@ pub unsafe fn _mm_maskz_unpacklo_epi16(k: __mmask8, a: __m128i, b: __m128i) -> _
/// Unpack and interleave 8-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_unpacklo_epi8&expand=6096)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpacklo_epi8&expand=6096)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpunpcklbw))]
@@ -7065,7 +7065,7 @@ pub unsafe fn _mm512_unpacklo_epi8(a: __m512i, b: __m512i) -> __m512i {
let a = a.as_i8x64();
let b = b.as_i8x64();
#[rustfmt::skip]
- let r: i8x64 = simd_shuffle64!(
+ let r: i8x64 = simd_shuffle!(
a,
b,
[
@@ -7092,7 +7092,7 @@ pub unsafe fn _mm512_unpacklo_epi8(a: __m512i, b: __m512i) -> __m512i {
/// Unpack and interleave 8-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_unpacklo_epi8&expand=6094)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpacklo_epi8&expand=6094)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpunpcklbw))]
@@ -7108,7 +7108,7 @@ pub unsafe fn _mm512_mask_unpacklo_epi8(
/// Unpack and interleave 8-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_unpacklo_epi8&expand=6095)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpacklo_epi8&expand=6095)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpunpcklbw))]
@@ -7120,7 +7120,7 @@ pub unsafe fn _mm512_maskz_unpacklo_epi8(k: __mmask64, a: __m512i, b: __m512i) -
/// Unpack and interleave 8-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_unpacklo_epi8&expand=6091)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpacklo_epi8&expand=6091)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpcklbw))]
@@ -7136,7 +7136,7 @@ pub unsafe fn _mm256_mask_unpacklo_epi8(
/// Unpack and interleave 8-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_unpacklo_epi8&expand=6092)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpacklo_epi8&expand=6092)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpcklbw))]
@@ -7148,7 +7148,7 @@ pub unsafe fn _mm256_maskz_unpacklo_epi8(k: __mmask32, a: __m256i, b: __m256i) -
/// Unpack and interleave 8-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_unpacklo_epi8&expand=6088)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpacklo_epi8&expand=6088)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpcklbw))]
@@ -7164,7 +7164,7 @@ pub unsafe fn _mm_mask_unpacklo_epi8(
/// Unpack and interleave 8-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_unpacklo_epi8&expand=6089)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpacklo_epi8&expand=6089)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpcklbw))]
@@ -7176,7 +7176,7 @@ pub unsafe fn _mm_maskz_unpacklo_epi8(k: __mmask16, a: __m128i, b: __m128i) -> _
/// Move packed 16-bit integers from a into dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mov_epi16&expand=3795)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mov_epi16&expand=3795)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vmovdqu16))]
@@ -7187,7 +7187,7 @@ pub unsafe fn _mm512_mask_mov_epi16(src: __m512i, k: __mmask32, a: __m512i) -> _
/// Move packed 16-bit integers from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mov_epi16&expand=3796)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mov_epi16&expand=3796)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vmovdqu16))]
@@ -7199,7 +7199,7 @@ pub unsafe fn _mm512_maskz_mov_epi16(k: __mmask32, a: __m512i) -> __m512i {
/// Move packed 16-bit integers from a into dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mov_epi16&expand=3793)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mov_epi16&expand=3793)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqu16))]
@@ -7210,7 +7210,7 @@ pub unsafe fn _mm256_mask_mov_epi16(src: __m256i, k: __mmask16, a: __m256i) -> _
/// Move packed 16-bit integers from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mov_epi16&expand=3794)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mov_epi16&expand=3794)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqu16))]
@@ -7222,7 +7222,7 @@ pub unsafe fn _mm256_maskz_mov_epi16(k: __mmask16, a: __m256i) -> __m256i {
/// Move packed 16-bit integers from a into dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mov_epi16&expand=3791)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mov_epi16&expand=3791)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqu16))]
@@ -7233,7 +7233,7 @@ pub unsafe fn _mm_mask_mov_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __m12
/// Move packed 16-bit integers from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mov_epi16&expand=3792)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mov_epi16&expand=3792)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqu16))]
@@ -7245,7 +7245,7 @@ pub unsafe fn _mm_maskz_mov_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Move packed 8-bit integers from a into dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mov_epi8&expand=3813)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mov_epi8&expand=3813)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vmovdqu8))]
@@ -7256,7 +7256,7 @@ pub unsafe fn _mm512_mask_mov_epi8(src: __m512i, k: __mmask64, a: __m512i) -> __
/// Move packed 8-bit integers from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mov_epi8&expand=3814)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mov_epi8&expand=3814)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vmovdqu8))]
@@ -7268,7 +7268,7 @@ pub unsafe fn _mm512_maskz_mov_epi8(k: __mmask64, a: __m512i) -> __m512i {
/// Move packed 8-bit integers from a into dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mov_epi8&expand=3811)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mov_epi8&expand=3811)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqu8))]
@@ -7279,7 +7279,7 @@ pub unsafe fn _mm256_mask_mov_epi8(src: __m256i, k: __mmask32, a: __m256i) -> __
/// Move packed 8-bit integers from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mov_epi8&expand=3812)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mov_epi8&expand=3812)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqu8))]
@@ -7291,7 +7291,7 @@ pub unsafe fn _mm256_maskz_mov_epi8(k: __mmask32, a: __m256i) -> __m256i {
/// Move packed 8-bit integers from a into dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mov_epi8&expand=3809)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mov_epi8&expand=3809)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqu8))]
@@ -7302,7 +7302,7 @@ pub unsafe fn _mm_mask_mov_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m12
/// Move packed 8-bit integers from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mov_epi8&expand=3810)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mov_epi8&expand=3810)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqu8))]
@@ -7314,7 +7314,7 @@ pub unsafe fn _mm_maskz_mov_epi8(k: __mmask16, a: __m128i) -> __m128i {
/// Broadcast 16-bit integer a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_set1_epi16&expand=4942)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_set1_epi16&expand=4942)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
@@ -7325,7 +7325,7 @@ pub unsafe fn _mm512_mask_set1_epi16(src: __m512i, k: __mmask32, a: i16) -> __m5
/// Broadcast the low packed 16-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_set1_epi16&expand=4943)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_set1_epi16&expand=4943)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
@@ -7337,7 +7337,7 @@ pub unsafe fn _mm512_maskz_set1_epi16(k: __mmask32, a: i16) -> __m512i {
/// Broadcast 16-bit integer a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_set1_epi16&expand=4939)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_set1_epi16&expand=4939)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
@@ -7348,7 +7348,7 @@ pub unsafe fn _mm256_mask_set1_epi16(src: __m256i, k: __mmask16, a: i16) -> __m2
/// Broadcast the low packed 16-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_set1_epi16&expand=4940)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_set1_epi16&expand=4940)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
@@ -7360,7 +7360,7 @@ pub unsafe fn _mm256_maskz_set1_epi16(k: __mmask16, a: i16) -> __m256i {
/// Broadcast 16-bit integer a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_set1_epi16&expand=4936)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_set1_epi16&expand=4936)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
@@ -7371,7 +7371,7 @@ pub unsafe fn _mm_mask_set1_epi16(src: __m128i, k: __mmask8, a: i16) -> __m128i
/// Broadcast the low packed 16-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_set1_epi16&expand=4937)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_set1_epi16&expand=4937)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastw))]
@@ -7383,7 +7383,7 @@ pub unsafe fn _mm_maskz_set1_epi16(k: __mmask8, a: i16) -> __m128i {
/// Broadcast 8-bit integer a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_set1_epi8&expand=4970)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_set1_epi8&expand=4970)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
@@ -7394,7 +7394,7 @@ pub unsafe fn _mm512_mask_set1_epi8(src: __m512i, k: __mmask64, a: i8) -> __m512
/// Broadcast 8-bit integer a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_set1_epi8&expand=4971)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_set1_epi8&expand=4971)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
@@ -7406,7 +7406,7 @@ pub unsafe fn _mm512_maskz_set1_epi8(k: __mmask64, a: i8) -> __m512i {
/// Broadcast 8-bit integer a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_set1_epi8&expand=4967)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_set1_epi8&expand=4967)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
@@ -7417,7 +7417,7 @@ pub unsafe fn _mm256_mask_set1_epi8(src: __m256i, k: __mmask32, a: i8) -> __m256
/// Broadcast 8-bit integer a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_set1_epi8&expand=4968)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_set1_epi8&expand=4968)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
@@ -7429,7 +7429,7 @@ pub unsafe fn _mm256_maskz_set1_epi8(k: __mmask32, a: i8) -> __m256i {
/// Broadcast 8-bit integer a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_set1_epi8&expand=4964)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_set1_epi8&expand=4964)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
@@ -7440,7 +7440,7 @@ pub unsafe fn _mm_mask_set1_epi8(src: __m128i, k: __mmask16, a: i8) -> __m128i {
/// Broadcast 8-bit integer a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_set1_epi8&expand=4965)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_set1_epi8&expand=4965)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastb))]
@@ -7452,18 +7452,18 @@ pub unsafe fn _mm_maskz_set1_epi8(k: __mmask16, a: i8) -> __m128i {
/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shufflelo_epi16&expand=5221)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shufflelo_epi16&expand=5221)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_shufflelo_epi16<const IMM8: i32>(a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x32();
- let r: i16x32 = simd_shuffle32!(
+ let r: i16x32 = simd_shuffle!(
a,
a,
- <const IMM8: i32> [
+ [
IMM8 as u32 & 0b11,
(IMM8 as u32 >> 2) & 0b11,
(IMM8 as u32 >> 4) & 0b11,
@@ -7503,7 +7503,7 @@ pub unsafe fn _mm512_shufflelo_epi16<const IMM8: i32>(a: __m512i) -> __m512i {
/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shufflelo_epi16&expand=5219)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shufflelo_epi16&expand=5219)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))]
@@ -7513,20 +7513,20 @@ pub unsafe fn _mm512_mask_shufflelo_epi16<const IMM8: i32>(
k: __mmask32,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm512_shufflelo_epi16::<IMM8>(a);
transmute(simd_select_bitmask(k, r.as_i16x32(), src.as_i16x32()))
}
/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shufflelo_epi16&expand=5220)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shufflelo_epi16&expand=5220)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_shufflelo_epi16<const IMM8: i32>(k: __mmask32, a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm512_shufflelo_epi16::<IMM8>(a);
let zero = _mm512_setzero_si512().as_i16x32();
transmute(simd_select_bitmask(k, r.as_i16x32(), zero))
@@ -7534,7 +7534,7 @@ pub unsafe fn _mm512_maskz_shufflelo_epi16<const IMM8: i32>(k: __mmask32, a: __m
/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shufflelo_epi16&expand=5216)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shufflelo_epi16&expand=5216)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))]
@@ -7544,20 +7544,20 @@ pub unsafe fn _mm256_mask_shufflelo_epi16<const IMM8: i32>(
k: __mmask16,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shuffle = _mm256_shufflelo_epi16::<IMM8>(a);
transmute(simd_select_bitmask(k, shuffle.as_i16x16(), src.as_i16x16()))
}
/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shufflelo_epi16&expand=5217)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shufflelo_epi16&expand=5217)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_shufflelo_epi16<const IMM8: i32>(k: __mmask16, a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shuffle = _mm256_shufflelo_epi16::<IMM8>(a);
let zero = _mm256_setzero_si256().as_i16x16();
transmute(simd_select_bitmask(k, shuffle.as_i16x16(), zero))
@@ -7565,7 +7565,7 @@ pub unsafe fn _mm256_maskz_shufflelo_epi16<const IMM8: i32>(k: __mmask16, a: __m
/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shufflelo_epi16&expand=5213)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shufflelo_epi16&expand=5213)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))]
@@ -7575,20 +7575,20 @@ pub unsafe fn _mm_mask_shufflelo_epi16<const IMM8: i32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shuffle = _mm_shufflelo_epi16::<IMM8>(a);
transmute(simd_select_bitmask(k, shuffle.as_i16x8(), src.as_i16x8()))
}
/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shufflelo_epi16&expand=5214)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shufflelo_epi16&expand=5214)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpshuflw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_shufflelo_epi16<const IMM8: i32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shuffle = _mm_shufflelo_epi16::<IMM8>(a);
let zero = _mm_setzero_si128().as_i16x8();
transmute(simd_select_bitmask(k, shuffle.as_i16x8(), zero))
@@ -7596,18 +7596,18 @@ pub unsafe fn _mm_maskz_shufflelo_epi16<const IMM8: i32>(k: __mmask8, a: __m128i
/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shufflehi_epi16&expand=5212)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shufflehi_epi16&expand=5212)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_shufflehi_epi16<const IMM8: i32>(a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x32();
- let r: i16x32 = simd_shuffle32!(
+ let r: i16x32 = simd_shuffle!(
a,
a,
- <const IMM8: i32> [
+ [
0,
1,
2,
@@ -7647,7 +7647,7 @@ pub unsafe fn _mm512_shufflehi_epi16<const IMM8: i32>(a: __m512i) -> __m512i {
/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shufflehi_epi16&expand=5210)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shufflehi_epi16&expand=5210)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))]
@@ -7657,20 +7657,20 @@ pub unsafe fn _mm512_mask_shufflehi_epi16<const IMM8: i32>(
k: __mmask32,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm512_shufflehi_epi16::<IMM8>(a);
transmute(simd_select_bitmask(k, r.as_i16x32(), src.as_i16x32()))
}
/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shufflehi_epi16&expand=5211)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shufflehi_epi16&expand=5211)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_shufflehi_epi16<const IMM8: i32>(k: __mmask32, a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm512_shufflehi_epi16::<IMM8>(a);
let zero = _mm512_setzero_si512().as_i16x32();
transmute(simd_select_bitmask(k, r.as_i16x32(), zero))
@@ -7678,7 +7678,7 @@ pub unsafe fn _mm512_maskz_shufflehi_epi16<const IMM8: i32>(k: __mmask32, a: __m
/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shufflehi_epi16&expand=5207)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shufflehi_epi16&expand=5207)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))]
@@ -7688,20 +7688,20 @@ pub unsafe fn _mm256_mask_shufflehi_epi16<const IMM8: i32>(
k: __mmask16,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shuffle = _mm256_shufflehi_epi16::<IMM8>(a);
transmute(simd_select_bitmask(k, shuffle.as_i16x16(), src.as_i16x16()))
}
/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shufflehi_epi16&expand=5208)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shufflehi_epi16&expand=5208)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_shufflehi_epi16<const IMM8: i32>(k: __mmask16, a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shuffle = _mm256_shufflehi_epi16::<IMM8>(a);
let zero = _mm256_setzero_si256().as_i16x16();
transmute(simd_select_bitmask(k, shuffle.as_i16x16(), zero))
@@ -7709,7 +7709,7 @@ pub unsafe fn _mm256_maskz_shufflehi_epi16<const IMM8: i32>(k: __mmask16, a: __m
/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shufflehi_epi16&expand=5204)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shufflehi_epi16&expand=5204)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))]
@@ -7719,20 +7719,20 @@ pub unsafe fn _mm_mask_shufflehi_epi16<const IMM8: i32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shuffle = _mm_shufflehi_epi16::<IMM8>(a);
transmute(simd_select_bitmask(k, shuffle.as_i16x8(), src.as_i16x8()))
}
/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shufflehi_epi16&expand=5205)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shufflehi_epi16&expand=5205)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufhw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_shufflehi_epi16<const IMM8: i32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shuffle = _mm_shufflehi_epi16::<IMM8>(a);
let zero = _mm_setzero_si128().as_i16x8();
transmute(simd_select_bitmask(k, shuffle.as_i16x8(), zero))
@@ -7740,7 +7740,7 @@ pub unsafe fn _mm_maskz_shufflehi_epi16<const IMM8: i32>(k: __mmask8, a: __m128i
/// Shuffle packed 8-bit integers in a according to shuffle control mask in the corresponding 8-bit element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shuffle_epi8&expand=5159)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_epi8&expand=5159)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpshufb))]
@@ -7750,7 +7750,7 @@ pub unsafe fn _mm512_shuffle_epi8(a: __m512i, b: __m512i) -> __m512i {
/// Shuffle 8-bit integers in a within 128-bit lanes using the control in the corresponding 8-bit element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shuffle_epi8&expand=5157)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_epi8&expand=5157)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpshufb))]
@@ -7766,7 +7766,7 @@ pub unsafe fn _mm512_mask_shuffle_epi8(
/// Shuffle packed 8-bit integers in a according to shuffle control mask in the corresponding 8-bit element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shuffle_epi8&expand=5158)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_epi8&expand=5158)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpshufb))]
@@ -7778,7 +7778,7 @@ pub unsafe fn _mm512_maskz_shuffle_epi8(k: __mmask64, a: __m512i, b: __m512i) ->
/// Shuffle 8-bit integers in a within 128-bit lanes using the control in the corresponding 8-bit element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shuffle_epi8&expand=5154)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_epi8&expand=5154)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufb))]
@@ -7794,7 +7794,7 @@ pub unsafe fn _mm256_mask_shuffle_epi8(
/// Shuffle packed 8-bit integers in a according to shuffle control mask in the corresponding 8-bit element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shuffle_epi8&expand=5155)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_epi8&expand=5155)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufb))]
@@ -7806,7 +7806,7 @@ pub unsafe fn _mm256_maskz_shuffle_epi8(k: __mmask32, a: __m256i, b: __m256i) ->
/// Shuffle 8-bit integers in a within 128-bit lanes using the control in the corresponding 8-bit element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shuffle_epi8&expand=5151)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shuffle_epi8&expand=5151)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufb))]
@@ -7817,7 +7817,7 @@ pub unsafe fn _mm_mask_shuffle_epi8(src: __m128i, k: __mmask16, a: __m128i, b: _
/// Shuffle packed 8-bit integers in a according to shuffle control mask in the corresponding 8-bit element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shuffle_epi8&expand=5152)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shuffle_epi8&expand=5152)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufb))]
@@ -7829,7 +7829,7 @@ pub unsafe fn _mm_maskz_shuffle_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __
/// Compute the bitwise AND of packed 16-bit integers in a and b, producing intermediate 16-bit values, and set the corresponding bit in result mask k if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_test_epi16_mask&expand=5884)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_test_epi16_mask&expand=5884)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vptestmw))]
@@ -7841,7 +7841,7 @@ pub unsafe fn _mm512_test_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compute the bitwise AND of packed 16-bit integers in a and b, producing intermediate 16-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_test_epi16_mask&expand=5883)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_test_epi16_mask&expand=5883)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vptestmw))]
@@ -7853,7 +7853,7 @@ pub unsafe fn _mm512_mask_test_epi16_mask(k: __mmask32, a: __m512i, b: __m512i)
/// Compute the bitwise AND of packed 16-bit integers in a and b, producing intermediate 16-bit values, and set the corresponding bit in result mask k if the intermediate value is non-zero.
///
-// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_test_epi16_mask&expand=5882)
+// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_test_epi16_mask&expand=5882)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmw))]
@@ -7865,7 +7865,7 @@ pub unsafe fn _mm256_test_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compute the bitwise AND of packed 16-bit integers in a and b, producing intermediate 16-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_test_epi16_mask&expand=5881)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_test_epi16_mask&expand=5881)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmw))]
@@ -7877,7 +7877,7 @@ pub unsafe fn _mm256_mask_test_epi16_mask(k: __mmask16, a: __m256i, b: __m256i)
/// Compute the bitwise AND of packed 16-bit integers in a and b, producing intermediate 16-bit values, and set the corresponding bit in result mask k if the intermediate value is non-zero.
///
-// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_epi16_mask&expand=5880)
+// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_epi16_mask&expand=5880)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmw))]
@@ -7889,7 +7889,7 @@ pub unsafe fn _mm_test_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compute the bitwise AND of packed 16-bit integers in a and b, producing intermediate 16-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_test_epi16_mask&expand=5879)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_test_epi16_mask&expand=5879)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmw))]
@@ -7901,7 +7901,7 @@ pub unsafe fn _mm_mask_test_epi16_mask(k: __mmask8, a: __m128i, b: __m128i) -> _
/// Compute the bitwise AND of packed 8-bit integers in a and b, producing intermediate 8-bit values, and set the corresponding bit in result mask k if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_test_epi8_mask&expand=5902)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_test_epi8_mask&expand=5902)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vptestmb))]
@@ -7913,7 +7913,7 @@ pub unsafe fn _mm512_test_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compute the bitwise AND of packed 8-bit integers in a and b, producing intermediate 8-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_test_epi8_mask&expand=5901)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_test_epi8_mask&expand=5901)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vptestmb))]
@@ -7925,7 +7925,7 @@ pub unsafe fn _mm512_mask_test_epi8_mask(k: __mmask64, a: __m512i, b: __m512i) -
/// Compute the bitwise AND of packed 8-bit integers in a and b, producing intermediate 8-bit values, and set the corresponding bit in result mask k if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_test_epi8_mask&expand=5900)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_test_epi8_mask&expand=5900)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmb))]
@@ -7937,7 +7937,7 @@ pub unsafe fn _mm256_test_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compute the bitwise AND of packed 8-bit integers in a and b, producing intermediate 8-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_test_epi8_mask&expand=5899)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_test_epi8_mask&expand=5899)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmb))]
@@ -7949,7 +7949,7 @@ pub unsafe fn _mm256_mask_test_epi8_mask(k: __mmask32, a: __m256i, b: __m256i) -
/// Compute the bitwise AND of packed 8-bit integers in a and b, producing intermediate 8-bit values, and set the corresponding bit in result mask k if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_epi8_mask&expand=5898)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_epi8_mask&expand=5898)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmb))]
@@ -7961,7 +7961,7 @@ pub unsafe fn _mm_test_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compute the bitwise AND of packed 8-bit integers in a and b, producing intermediate 8-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_test_epi8_mask&expand=5897)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_test_epi8_mask&expand=5897)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmb))]
@@ -7973,7 +7973,7 @@ pub unsafe fn _mm_mask_test_epi8_mask(k: __mmask16, a: __m128i, b: __m128i) -> _
/// Compute the bitwise NAND of packed 16-bit integers in a and b, producing intermediate 16-bit values, and set the corresponding bit in result mask k if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_testn_epi16_mask&expand=5915)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_testn_epi16_mask&expand=5915)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vptestnmw))]
@@ -7985,7 +7985,7 @@ pub unsafe fn _mm512_testn_epi16_mask(a: __m512i, b: __m512i) -> __mmask32 {
/// Compute the bitwise NAND of packed 16-bit integers in a and b, producing intermediate 16-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_testn_epi16&expand=5914)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_testn_epi16&expand=5914)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vptestnmw))]
@@ -7997,7 +7997,7 @@ pub unsafe fn _mm512_mask_testn_epi16_mask(k: __mmask32, a: __m512i, b: __m512i)
/// Compute the bitwise NAND of packed 16-bit integers in a and b, producing intermediate 16-bit values, and set the corresponding bit in result mask k if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testn_epi16_mask&expand=5913)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testn_epi16_mask&expand=5913)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmw))]
@@ -8009,7 +8009,7 @@ pub unsafe fn _mm256_testn_epi16_mask(a: __m256i, b: __m256i) -> __mmask16 {
/// Compute the bitwise NAND of packed 16-bit integers in a and b, producing intermediate 16-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_testn_epi16_mask&expand=5912)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_testn_epi16_mask&expand=5912)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmw))]
@@ -8021,7 +8021,7 @@ pub unsafe fn _mm256_mask_testn_epi16_mask(k: __mmask16, a: __m256i, b: __m256i)
/// Compute the bitwise NAND of packed 16-bit integers in a and b, producing intermediate 16-bit values, and set the corresponding bit in result mask k if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testn_epi16_mask&expand=5911)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testn_epi16_mask&expand=5911)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmw))]
@@ -8033,7 +8033,7 @@ pub unsafe fn _mm_testn_epi16_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compute the bitwise NAND of packed 16-bit integers in a and b, producing intermediate 16-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_testn_epi16_mask&expand=5910)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_testn_epi16_mask&expand=5910)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmw))]
@@ -8045,7 +8045,7 @@ pub unsafe fn _mm_mask_testn_epi16_mask(k: __mmask8, a: __m128i, b: __m128i) ->
/// Compute the bitwise NAND of packed 8-bit integers in a and b, producing intermediate 8-bit values, and set the corresponding bit in result mask k if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_testn_epi8_mask&expand=5933)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_testn_epi8_mask&expand=5933)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vptestnmb))]
@@ -8057,7 +8057,7 @@ pub unsafe fn _mm512_testn_epi8_mask(a: __m512i, b: __m512i) -> __mmask64 {
/// Compute the bitwise NAND of packed 8-bit integers in a and b, producing intermediate 8-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_testn_epi8_mask&expand=5932)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_testn_epi8_mask&expand=5932)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vptestnmb))]
@@ -8069,7 +8069,7 @@ pub unsafe fn _mm512_mask_testn_epi8_mask(k: __mmask64, a: __m512i, b: __m512i)
/// Compute the bitwise NAND of packed 8-bit integers in a and b, producing intermediate 8-bit values, and set the corresponding bit in result mask k if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testn_epi8_mask&expand=5931)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testn_epi8_mask&expand=5931)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmb))]
@@ -8081,7 +8081,7 @@ pub unsafe fn _mm256_testn_epi8_mask(a: __m256i, b: __m256i) -> __mmask32 {
/// Compute the bitwise NAND of packed 8-bit integers in a and b, producing intermediate 8-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_testn_epi8_mask&expand=5930)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_testn_epi8_mask&expand=5930)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmb))]
@@ -8093,7 +8093,7 @@ pub unsafe fn _mm256_mask_testn_epi8_mask(k: __mmask32, a: __m256i, b: __m256i)
/// Compute the bitwise NAND of packed 8-bit integers in a and b, producing intermediate 8-bit values, and set the corresponding bit in result mask k if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testn_epi8_mask&expand=5929)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testn_epi8_mask&expand=5929)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmb))]
@@ -8105,7 +8105,7 @@ pub unsafe fn _mm_testn_epi8_mask(a: __m128i, b: __m128i) -> __mmask16 {
/// Compute the bitwise NAND of packed 8-bit integers in a and b, producing intermediate 8-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_testn_epi8_mask&expand=5928)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_testn_epi8_mask&expand=5928)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmb))]
@@ -8117,7 +8117,7 @@ pub unsafe fn _mm_mask_testn_epi8_mask(k: __mmask16, a: __m128i, b: __m128i) ->
/// Store 64-bit mask from a into memory.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_store_mask64&expand=5578)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_store_mask64&expand=5578)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(mov))] //should be kmovq
@@ -8127,7 +8127,7 @@ pub unsafe fn _store_mask64(mem_addr: *mut u64, a: __mmask64) {
/// Store 32-bit mask from a into memory.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_store_mask32&expand=5577)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_store_mask32&expand=5577)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(mov))] //should be kmovd
@@ -8137,7 +8137,7 @@ pub unsafe fn _store_mask32(mem_addr: *mut u32, a: __mmask32) {
/// Load 64-bit mask from memory into k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_load_mask64&expand=3318)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_load_mask64&expand=3318)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(mov))] //should be kmovq
@@ -8147,7 +8147,7 @@ pub unsafe fn _load_mask64(mem_addr: *const u64) -> __mmask64 {
/// Load 32-bit mask from memory into k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_load_mask32&expand=3317)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_load_mask32&expand=3317)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(mov))] //should be kmovd
@@ -8157,7 +8157,7 @@ pub unsafe fn _load_mask32(mem_addr: *const u32) -> __mmask32 {
/// Compute the absolute differences of packed unsigned 8-bit integers in a and b, then horizontally sum each consecutive 8 differences to produce eight unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit elements in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sad_epu8&expand=4855)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sad_epu8&expand=4855)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsadbw))]
@@ -8167,13 +8167,13 @@ pub unsafe fn _mm512_sad_epu8(a: __m512i, b: __m512i) -> __m512i {
/// Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in a compared to those in b, and store the 16-bit results in dst. Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from a, and the last two SADs use the uppper 8-bit quadruplet of the lane from a. Quadruplets from b are selected from within 128-bit lanes according to the control in imm8, and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_dbsad_epu8&expand=2114)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_dbsad_epu8&expand=2114)
#[inline]
#[target_feature(enable = "avx512bw")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))]
pub unsafe fn _mm512_dbsad_epu8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_u8x64();
let b = b.as_u8x64();
let r = vdbpsadbw(a, b, IMM8);
@@ -8182,7 +8182,7 @@ pub unsafe fn _mm512_dbsad_epu8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m5
/// Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in a compared to those in b, and store the 16-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from a, and the last two SADs use the uppper 8-bit quadruplet of the lane from a. Quadruplets from b are selected from within 128-bit lanes according to the control in imm8, and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_dbsad_epu8&expand=2115)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_dbsad_epu8&expand=2115)
#[inline]
#[target_feature(enable = "avx512bw")]
#[rustc_legacy_const_generics(4)]
@@ -8193,7 +8193,7 @@ pub unsafe fn _mm512_mask_dbsad_epu8<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_u8x64();
let b = b.as_u8x64();
let r = vdbpsadbw(a, b, IMM8);
@@ -8202,7 +8202,7 @@ pub unsafe fn _mm512_mask_dbsad_epu8<const IMM8: i32>(
/// Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in a compared to those in b, and store the 16-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from a, and the last two SADs use the uppper 8-bit quadruplet of the lane from a. Quadruplets from b are selected from within 128-bit lanes according to the control in imm8, and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_dbsad_epu8&expand=2116)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_dbsad_epu8&expand=2116)
#[inline]
#[target_feature(enable = "avx512bw")]
#[rustc_legacy_const_generics(3)]
@@ -8212,7 +8212,7 @@ pub unsafe fn _mm512_maskz_dbsad_epu8<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_u8x64();
let b = b.as_u8x64();
let r = vdbpsadbw(a, b, IMM8);
@@ -8225,13 +8225,13 @@ pub unsafe fn _mm512_maskz_dbsad_epu8<const IMM8: i32>(
/// Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in a compared to those in b, and store the 16-bit results in dst. Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from a, and the last two SADs use the uppper 8-bit quadruplet of the lane from a. Quadruplets from b are selected from within 128-bit lanes according to the control in imm8, and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_dbsad_epu8&expand=2111)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_dbsad_epu8&expand=2111)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))]
pub unsafe fn _mm256_dbsad_epu8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_u8x32();
let b = b.as_u8x32();
let r = vdbpsadbw256(a, b, IMM8);
@@ -8240,7 +8240,7 @@ pub unsafe fn _mm256_dbsad_epu8<const IMM8: i32>(a: __m256i, b: __m256i) -> __m2
/// Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in a compared to those in b, and store the 16-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from a, and the last two SADs use the uppper 8-bit quadruplet of the lane from a. Quadruplets from b are selected from within 128-bit lanes according to the control in imm8, and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_dbsad_epu8&expand=2112)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_dbsad_epu8&expand=2112)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(4)]
@@ -8251,7 +8251,7 @@ pub unsafe fn _mm256_mask_dbsad_epu8<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_u8x32();
let b = b.as_u8x32();
let r = vdbpsadbw256(a, b, IMM8);
@@ -8260,7 +8260,7 @@ pub unsafe fn _mm256_mask_dbsad_epu8<const IMM8: i32>(
/// Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in a compared to those in b, and store the 16-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from a, and the last two SADs use the uppper 8-bit quadruplet of the lane from a. Quadruplets from b are selected from within 128-bit lanes according to the control in imm8, and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_dbsad_epu8&expand=2113)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_dbsad_epu8&expand=2113)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -8270,7 +8270,7 @@ pub unsafe fn _mm256_maskz_dbsad_epu8<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_u8x32();
let b = b.as_u8x32();
let r = vdbpsadbw256(a, b, IMM8);
@@ -8283,13 +8283,13 @@ pub unsafe fn _mm256_maskz_dbsad_epu8<const IMM8: i32>(
/// Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in a compared to those in b, and store the 16-bit results in dst. Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from a, and the last two SADs use the uppper 8-bit quadruplet of the lane from a. Quadruplets from b are selected from within 128-bit lanes according to the control in imm8, and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_dbsad_epu8&expand=2108)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dbsad_epu8&expand=2108)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vdbpsadbw, IMM8 = 0))]
pub unsafe fn _mm_dbsad_epu8<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_u8x16();
let b = b.as_u8x16();
let r = vdbpsadbw128(a, b, IMM8);
@@ -8298,7 +8298,7 @@ pub unsafe fn _mm_dbsad_epu8<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i
/// Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in a compared to those in b, and store the 16-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from a, and the last two SADs use the uppper 8-bit quadruplet of the lane from a. Quadruplets from b are selected from within 128-bit lanes according to the control in imm8, and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_dbsad_epu8&expand=2109)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_dbsad_epu8&expand=2109)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(4)]
@@ -8309,7 +8309,7 @@ pub unsafe fn _mm_mask_dbsad_epu8<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_u8x16();
let b = b.as_u8x16();
let r = vdbpsadbw128(a, b, IMM8);
@@ -8318,7 +8318,7 @@ pub unsafe fn _mm_mask_dbsad_epu8<const IMM8: i32>(
/// Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in a compared to those in b, and store the 16-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from a, and the last two SADs use the uppper 8-bit quadruplet of the lane from a. Quadruplets from b are selected from within 128-bit lanes according to the control in imm8, and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_dbsad_epu8&expand=2110)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_dbsad_epu8&expand=2110)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -8328,7 +8328,7 @@ pub unsafe fn _mm_maskz_dbsad_epu8<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_u8x16();
let b = b.as_u8x16();
let r = vdbpsadbw128(a, b, IMM8);
@@ -8337,7 +8337,7 @@ pub unsafe fn _mm_maskz_dbsad_epu8<const IMM8: i32>(
/// Set each bit of mask register k based on the most significant bit of the corresponding packed 16-bit integer in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_movepi16_mask&expand=3873)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_movepi16_mask&expand=3873)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovw2m))]
@@ -8349,7 +8349,7 @@ pub unsafe fn _mm512_movepi16_mask(a: __m512i) -> __mmask32 {
/// Set each bit of mask register k based on the most significant bit of the corresponding packed 16-bit integer in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movepi16_mask&expand=3872)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movepi16_mask&expand=3872)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovw2m))]
@@ -8361,7 +8361,7 @@ pub unsafe fn _mm256_movepi16_mask(a: __m256i) -> __mmask16 {
/// Set each bit of mask register k based on the most significant bit of the corresponding packed 16-bit integer in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movepi16_mask&expand=3871)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movepi16_mask&expand=3871)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovw2m))]
@@ -8373,7 +8373,7 @@ pub unsafe fn _mm_movepi16_mask(a: __m128i) -> __mmask8 {
/// Set each bit of mask register k based on the most significant bit of the corresponding packed 8-bit integer in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_movepi8_mask&expand=3883)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_movepi8_mask&expand=3883)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovb2m))]
@@ -8385,7 +8385,7 @@ pub unsafe fn _mm512_movepi8_mask(a: __m512i) -> __mmask64 {
/// Set each bit of mask register k based on the most significant bit of the corresponding packed 8-bit integer in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movepi8_mask&expand=3882)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movepi8_mask&expand=3882)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovmskb))] // should be vpmovb2m but compiled to vpmovmskb in the test shim because that takes less cycles than
@@ -8398,7 +8398,7 @@ pub unsafe fn _mm256_movepi8_mask(a: __m256i) -> __mmask32 {
/// Set each bit of mask register k based on the most significant bit of the corresponding packed 8-bit integer in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movepi8_mask&expand=3881)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movepi8_mask&expand=3881)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovmskb))] // should be vpmovb2m but compiled to vpmovmskb in the test shim because that takes less cycles than
@@ -8411,7 +8411,7 @@ pub unsafe fn _mm_movepi8_mask(a: __m128i) -> __mmask16 {
/// Set each packed 16-bit integer in dst to all ones or all zeros based on the value of the corresponding bit in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_movm_epi16&expand=3886)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_movm_epi16&expand=3886)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovm2w))]
@@ -8441,7 +8441,7 @@ pub unsafe fn _mm512_movm_epi16(k: __mmask32) -> __m512i {
/// Set each packed 16-bit integer in dst to all ones or all zeros based on the value of the corresponding bit in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movm_epi16&expand=3885)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movm_epi16&expand=3885)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovm2w))]
@@ -8471,7 +8471,7 @@ pub unsafe fn _mm256_movm_epi16(k: __mmask16) -> __m256i {
/// Set each packed 16-bit integer in dst to all ones or all zeros based on the value of the corresponding bit in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movm_epi16&expand=3884)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movm_epi16&expand=3884)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovm2w))]
@@ -8501,7 +8501,7 @@ pub unsafe fn _mm_movm_epi16(k: __mmask8) -> __m128i {
/// Set each packed 8-bit integer in dst to all ones or all zeros based on the value of the corresponding bit in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_movm_epi8&expand=3895)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_movm_epi8&expand=3895)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovm2b))]
@@ -8515,7 +8515,7 @@ pub unsafe fn _mm512_movm_epi8(k: __mmask64) -> __m512i {
/// Set each packed 8-bit integer in dst to all ones or all zeros based on the value of the corresponding bit in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movm_epi8&expand=3894)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_movm_epi8&expand=3894)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovm2b))]
@@ -8529,7 +8529,7 @@ pub unsafe fn _mm256_movm_epi8(k: __mmask32) -> __m256i {
/// Set each packed 8-bit integer in dst to all ones or all zeros based on the value of the corresponding bit in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movm_epi8&expand=3893)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movm_epi8&expand=3893)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovm2b))]
@@ -8542,7 +8542,7 @@ pub unsafe fn _mm_movm_epi8(k: __mmask16) -> __m128i {
/// Add 32-bit masks in a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kadd_mask32&expand=3207)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kadd_mask32&expand=3207)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
@@ -8551,7 +8551,7 @@ pub unsafe fn _kadd_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
/// Add 64-bit masks in a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kadd_mask64&expand=3208)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kadd_mask64&expand=3208)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
@@ -8560,7 +8560,7 @@ pub unsafe fn _kadd_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
/// Compute the bitwise AND of 32-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kand_mask32&expand=3213)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kand_mask32&expand=3213)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
@@ -8569,7 +8569,7 @@ pub unsafe fn _kand_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
/// Compute the bitwise AND of 64-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kand_mask64&expand=3214)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kand_mask64&expand=3214)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kand_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
@@ -8578,7 +8578,7 @@ pub unsafe fn _kand_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
/// Compute the bitwise NOT of 32-bit mask a, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_knot_mask32&expand=3234)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_knot_mask32&expand=3234)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _knot_mask32(a: __mmask32) -> __mmask32 {
@@ -8587,7 +8587,7 @@ pub unsafe fn _knot_mask32(a: __mmask32) -> __mmask32 {
/// Compute the bitwise NOT of 64-bit mask a, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_knot_mask64&expand=3235)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_knot_mask64&expand=3235)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _knot_mask64(a: __mmask64) -> __mmask64 {
@@ -8596,7 +8596,7 @@ pub unsafe fn _knot_mask64(a: __mmask64) -> __mmask64 {
/// Compute the bitwise NOT of 32-bit masks a and then AND with b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kandn_mask32&expand=3219)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kandn_mask32&expand=3219)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
@@ -8605,7 +8605,7 @@ pub unsafe fn _kandn_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
/// Compute the bitwise NOT of 64-bit masks a and then AND with b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kandn_mask64&expand=3220)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kandn_mask64&expand=3220)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
@@ -8614,7 +8614,7 @@ pub unsafe fn _kandn_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
/// Compute the bitwise OR of 32-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kor_mask32&expand=3240)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kor_mask32&expand=3240)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
@@ -8623,7 +8623,7 @@ pub unsafe fn _kor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
/// Compute the bitwise OR of 64-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kor_mask64&expand=3241)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kor_mask64&expand=3241)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
@@ -8632,7 +8632,7 @@ pub unsafe fn _kor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
/// Compute the bitwise XOR of 32-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kxor_mask32&expand=3292)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kxor_mask32&expand=3292)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
@@ -8641,7 +8641,7 @@ pub unsafe fn _kxor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
/// Compute the bitwise XOR of 64-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kxor_mask64&expand=3293)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kxor_mask64&expand=3293)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
@@ -8650,7 +8650,7 @@ pub unsafe fn _kxor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
/// Compute the bitwise XNOR of 32-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kxnor_mask32&expand=3286)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kxnor_mask32&expand=3286)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
@@ -8659,7 +8659,7 @@ pub unsafe fn _kxnor_mask32(a: __mmask32, b: __mmask32) -> __mmask32 {
/// Compute the bitwise XNOR of 64-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_kxnor_mask64&expand=3287)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_kxnor_mask64&expand=3287)
#[inline]
#[target_feature(enable = "avx512bw")]
pub unsafe fn _kxnor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
@@ -8668,7 +8668,7 @@ pub unsafe fn _kxnor_mask64(a: __mmask64, b: __mmask64) -> __mmask64 {
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi16_epi8&expand=1407)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi16_epi8&expand=1407)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovwb))]
@@ -8679,7 +8679,7 @@ pub unsafe fn _mm512_cvtepi16_epi8(a: __m512i) -> __m256i {
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi16_epi8&expand=1408)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi16_epi8&expand=1408)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovwb))]
@@ -8690,7 +8690,7 @@ pub unsafe fn _mm512_mask_cvtepi16_epi8(src: __m256i, k: __mmask32, a: __m512i)
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi16_epi8&expand=1409)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi16_epi8&expand=1409)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovwb))]
@@ -8705,7 +8705,7 @@ pub unsafe fn _mm512_maskz_cvtepi16_epi8(k: __mmask32, a: __m512i) -> __m256i {
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi16_epi8&expand=1404)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi16_epi8&expand=1404)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovwb))]
@@ -8716,7 +8716,7 @@ pub unsafe fn _mm256_cvtepi16_epi8(a: __m256i) -> __m128i {
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi16_epi8&expand=1405)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi16_epi8&expand=1405)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovwb))]
@@ -8727,7 +8727,7 @@ pub unsafe fn _mm256_mask_cvtepi16_epi8(src: __m128i, k: __mmask16, a: __m256i)
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi16_epi8&expand=1406)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi16_epi8&expand=1406)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovwb))]
@@ -8742,20 +8742,20 @@ pub unsafe fn _mm256_maskz_cvtepi16_epi8(k: __mmask16, a: __m256i) -> __m128i {
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi16_epi8&expand=1401)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi16_epi8&expand=1401)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovwb))]
pub unsafe fn _mm_cvtepi16_epi8(a: __m128i) -> __m128i {
let a = a.as_i16x8();
let zero = _mm_setzero_si128().as_i16x8();
- let v256: i16x16 = simd_shuffle16!(a, zero, [0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8, 8, 8, 8, 8]);
+ let v256: i16x16 = simd_shuffle!(a, zero, [0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8, 8, 8, 8, 8]);
transmute::<i8x16, _>(simd_cast(v256))
}
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi16_epi8&expand=1402)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi16_epi8&expand=1402)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovwb))]
@@ -8767,7 +8767,7 @@ pub unsafe fn _mm_mask_cvtepi16_epi8(src: __m128i, k: __mmask8, a: __m128i) -> _
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi16_epi8&expand=1403)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi16_epi8&expand=1403)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovwb))]
@@ -8780,7 +8780,7 @@ pub unsafe fn _mm_maskz_cvtepi16_epi8(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtsepi16_epi8&expand=1807)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi16_epi8&expand=1807)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovswb))]
@@ -8794,7 +8794,7 @@ pub unsafe fn _mm512_cvtsepi16_epi8(a: __m512i) -> __m256i {
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtsepi16_epi8&expand=1808)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi16_epi8&expand=1808)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovswb))]
@@ -8804,7 +8804,7 @@ pub unsafe fn _mm512_mask_cvtsepi16_epi8(src: __m256i, k: __mmask32, a: __m512i)
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtsepi16_epi8&expand=1809)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtsepi16_epi8&expand=1809)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovswb))]
@@ -8818,7 +8818,7 @@ pub unsafe fn _mm512_maskz_cvtsepi16_epi8(k: __mmask32, a: __m512i) -> __m256i {
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsepi16_epi8&expand=1804)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsepi16_epi8&expand=1804)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovswb))]
@@ -8832,7 +8832,7 @@ pub unsafe fn _mm256_cvtsepi16_epi8(a: __m256i) -> __m128i {
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtsepi16_epi8&expand=1805)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi16_epi8&expand=1805)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovswb))]
@@ -8842,7 +8842,7 @@ pub unsafe fn _mm256_mask_cvtsepi16_epi8(src: __m128i, k: __mmask16, a: __m256i)
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtsepi16_epi8&expand=1806)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtsepi16_epi8&expand=1806)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovswb))]
@@ -8856,7 +8856,7 @@ pub unsafe fn _mm256_maskz_cvtsepi16_epi8(k: __mmask16, a: __m256i) -> __m128i {
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsepi16_epi8&expand=1801)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsepi16_epi8&expand=1801)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovswb))]
@@ -8870,7 +8870,7 @@ pub unsafe fn _mm_cvtsepi16_epi8(a: __m128i) -> __m128i {
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsepi16_epi8&expand=1802)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi16_epi8&expand=1802)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovswb))]
@@ -8880,7 +8880,7 @@ pub unsafe fn _mm_mask_cvtsepi16_epi8(src: __m128i, k: __mmask8, a: __m128i) ->
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsepi16_epi8&expand=1803)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtsepi16_epi8&expand=1803)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovswb))]
@@ -8890,7 +8890,7 @@ pub unsafe fn _mm_maskz_cvtsepi16_epi8(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtusepi16_epi8&expand=2042)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtusepi16_epi8&expand=2042)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
@@ -8904,7 +8904,7 @@ pub unsafe fn _mm512_cvtusepi16_epi8(a: __m512i) -> __m256i {
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtusepi16_epi8&expand=2043)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi16_epi8&expand=2043)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
@@ -8914,7 +8914,7 @@ pub unsafe fn _mm512_mask_cvtusepi16_epi8(src: __m256i, k: __mmask32, a: __m512i
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtusepi16_epi8&expand=2044)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtusepi16_epi8&expand=2044)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
@@ -8928,7 +8928,7 @@ pub unsafe fn _mm512_maskz_cvtusepi16_epi8(k: __mmask32, a: __m512i) -> __m256i
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtusepi16_epi8&expand=2039)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtusepi16_epi8&expand=2039)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
@@ -8942,7 +8942,7 @@ pub unsafe fn _mm256_cvtusepi16_epi8(a: __m256i) -> __m128i {
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtusepi16_epi8&expand=2040)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi16_epi8&expand=2040)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
@@ -8952,7 +8952,7 @@ pub unsafe fn _mm256_mask_cvtusepi16_epi8(src: __m128i, k: __mmask16, a: __m256i
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtusepi16_epi8&expand=2041)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtusepi16_epi8&expand=2041)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
@@ -8966,7 +8966,7 @@ pub unsafe fn _mm256_maskz_cvtusepi16_epi8(k: __mmask16, a: __m256i) -> __m128i
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtusepi16_epi8&expand=2036)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtusepi16_epi8&expand=2036)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
@@ -8980,7 +8980,7 @@ pub unsafe fn _mm_cvtusepi16_epi8(a: __m128i) -> __m128i {
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtusepi16_epi8&expand=2037)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi16_epi8&expand=2037)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
@@ -8990,7 +8990,7 @@ pub unsafe fn _mm_mask_cvtusepi16_epi8(src: __m128i, k: __mmask8, a: __m128i) ->
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtusepi16_epi8&expand=2038)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtusepi16_epi8&expand=2038)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
@@ -9004,7 +9004,7 @@ pub unsafe fn _mm_maskz_cvtusepi16_epi8(k: __mmask8, a: __m128i) -> __m128i {
/// Sign extend packed 8-bit integers in a to packed 16-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi8_epi16&expand=1526)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi8_epi16&expand=1526)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovsxbw))]
@@ -9015,7 +9015,7 @@ pub unsafe fn _mm512_cvtepi8_epi16(a: __m256i) -> __m512i {
/// Sign extend packed 8-bit integers in a to packed 16-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi8_epi16&expand=1527)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi8_epi16&expand=1527)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovsxbw))]
@@ -9026,7 +9026,7 @@ pub unsafe fn _mm512_mask_cvtepi8_epi16(src: __m512i, k: __mmask32, a: __m256i)
/// Sign extend packed 8-bit integers in a to packed 16-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi8_epi16&expand=1528)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi8_epi16&expand=1528)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovsxbw))]
@@ -9041,7 +9041,7 @@ pub unsafe fn _mm512_maskz_cvtepi8_epi16(k: __mmask32, a: __m256i) -> __m512i {
/// Sign extend packed 8-bit integers in a to packed 16-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi8_epi16&expand=1524)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi8_epi16&expand=1524)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxbw))]
@@ -9052,7 +9052,7 @@ pub unsafe fn _mm256_mask_cvtepi8_epi16(src: __m256i, k: __mmask16, a: __m128i)
/// Sign extend packed 8-bit integers in a to packed 16-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi8_epi16&expand=1525)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi8_epi16&expand=1525)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxbw))]
@@ -9067,7 +9067,7 @@ pub unsafe fn _mm256_maskz_cvtepi8_epi16(k: __mmask16, a: __m128i) -> __m256i {
/// Sign extend packed 8-bit integers in a to packed 16-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi8_epi16&expand=1521)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi8_epi16&expand=1521)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxbw))]
@@ -9078,7 +9078,7 @@ pub unsafe fn _mm_mask_cvtepi8_epi16(src: __m128i, k: __mmask8, a: __m128i) -> _
/// Sign extend packed 8-bit integers in a to packed 16-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi8_epi16&expand=1522)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi8_epi16&expand=1522)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxbw))]
@@ -9093,7 +9093,7 @@ pub unsafe fn _mm_maskz_cvtepi8_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Zero extend packed unsigned 8-bit integers in a to packed 16-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu8_epi16&expand=1612)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu8_epi16&expand=1612)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovzxbw))]
@@ -9104,7 +9104,7 @@ pub unsafe fn _mm512_cvtepu8_epi16(a: __m256i) -> __m512i {
/// Zero extend packed unsigned 8-bit integers in a to packed 16-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu8_epi16&expand=1613)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu8_epi16&expand=1613)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovzxbw))]
@@ -9115,7 +9115,7 @@ pub unsafe fn _mm512_mask_cvtepu8_epi16(src: __m512i, k: __mmask32, a: __m256i)
/// Zero extend packed unsigned 8-bit integers in a to packed 16-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu8_epi16&expand=1614)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu8_epi16&expand=1614)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovzxbw))]
@@ -9130,7 +9130,7 @@ pub unsafe fn _mm512_maskz_cvtepu8_epi16(k: __mmask32, a: __m256i) -> __m512i {
/// Zero extend packed unsigned 8-bit integers in a to packed 16-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu8_epi16&expand=1610)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu8_epi16&expand=1610)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxbw))]
@@ -9141,7 +9141,7 @@ pub unsafe fn _mm256_mask_cvtepu8_epi16(src: __m256i, k: __mmask16, a: __m128i)
/// Zero extend packed unsigned 8-bit integers in a to packed 16-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepu8_epi16&expand=1611)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepu8_epi16&expand=1611)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxbw))]
@@ -9156,7 +9156,7 @@ pub unsafe fn _mm256_maskz_cvtepu8_epi16(k: __mmask16, a: __m128i) -> __m256i {
/// Zero extend packed unsigned 8-bit integers in a to packed 16-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu8_epi16&expand=1607)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu8_epi16&expand=1607)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxbw))]
@@ -9167,7 +9167,7 @@ pub unsafe fn _mm_mask_cvtepu8_epi16(src: __m128i, k: __mmask8, a: __m128i) -> _
/// Zero extend packed unsigned 8-bit integers in a to packed 16-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepu8_epi16&expand=1608)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepu8_epi16&expand=1608)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxbw))]
@@ -9182,13 +9182,13 @@ pub unsafe fn _mm_maskz_cvtepu8_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Shift 128-bit lanes in a left by imm8 bytes while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_bslli_epi128&expand=591)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_bslli_epi128&expand=591)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpslldq, IMM8 = 3))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_bslli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
const fn mask(shift: i32, i: u32) -> u32 {
let shift = shift as u32 & 0xff;
if shift > 15 || i % 16 < shift {
@@ -9199,10 +9199,10 @@ pub unsafe fn _mm512_bslli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
}
let a = a.as_i8x64();
let zero = _mm512_setzero_si512().as_i8x64();
- let r: i8x64 = simd_shuffle64!(
+ let r: i8x64 = simd_shuffle!(
zero,
a,
- <const IMM8: i32> [
+ [
mask(IMM8, 0),
mask(IMM8, 1),
mask(IMM8, 2),
@@ -9274,17 +9274,17 @@ pub unsafe fn _mm512_bslli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
/// Shift 128-bit lanes in a right by imm8 bytes while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_bsrli_epi128&expand=594)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_bsrli_epi128&expand=594)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpsrldq, IMM8 = 3))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i8x64();
let zero = _mm512_setzero_si512().as_i8x64();
let r: i8x64 = match IMM8 % 16 {
- 0 => simd_shuffle64!(
+ 0 => simd_shuffle!(
a,
zero,
[
@@ -9293,7 +9293,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
],
),
- 1 => simd_shuffle64!(
+ 1 => simd_shuffle!(
a,
zero,
[
@@ -9302,7 +9302,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
45, 46, 47, 96, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 112,
],
),
- 2 => simd_shuffle64!(
+ 2 => simd_shuffle!(
a,
zero,
[
@@ -9311,7 +9311,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
46, 47, 96, 97, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 112, 113,
],
),
- 3 => simd_shuffle64!(
+ 3 => simd_shuffle!(
a,
zero,
[
@@ -9321,7 +9321,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
114,
],
),
- 4 => simd_shuffle64!(
+ 4 => simd_shuffle!(
a,
zero,
[
@@ -9331,7 +9331,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
115,
],
),
- 5 => simd_shuffle64!(
+ 5 => simd_shuffle!(
a,
zero,
[
@@ -9341,7 +9341,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
115, 116,
],
),
- 6 => simd_shuffle64!(
+ 6 => simd_shuffle!(
a,
zero,
[
@@ -9351,7 +9351,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
116, 117,
],
),
- 7 => simd_shuffle64!(
+ 7 => simd_shuffle!(
a,
zero,
[
@@ -9361,7 +9361,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
116, 117, 118,
],
),
- 8 => simd_shuffle64!(
+ 8 => simd_shuffle!(
a,
zero,
[
@@ -9371,7 +9371,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
116, 117, 118, 119,
],
),
- 9 => simd_shuffle64!(
+ 9 => simd_shuffle!(
a,
zero,
[
@@ -9381,7 +9381,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
117, 118, 119, 120,
],
),
- 10 => simd_shuffle64!(
+ 10 => simd_shuffle!(
a,
zero,
[
@@ -9391,7 +9391,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
118, 119, 120, 121,
],
),
- 11 => simd_shuffle64!(
+ 11 => simd_shuffle!(
a,
zero,
[
@@ -9401,7 +9401,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
117, 118, 119, 120, 121, 122,
],
),
- 12 => simd_shuffle64!(
+ 12 => simd_shuffle!(
a,
zero,
[
@@ -9411,7 +9411,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
118, 119, 120, 121, 122, 123,
],
),
- 13 => simd_shuffle64!(
+ 13 => simd_shuffle!(
a,
zero,
[
@@ -9421,7 +9421,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
119, 120, 121, 122, 123, 124,
],
),
- 14 => simd_shuffle64!(
+ 14 => simd_shuffle!(
a,
zero,
[
@@ -9431,7 +9431,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
120, 121, 122, 123, 124, 125,
],
),
- 15 => simd_shuffle64!(
+ 15 => simd_shuffle!(
a,
zero,
[
@@ -9448,7 +9448,7 @@ pub unsafe fn _mm512_bsrli_epi128<const IMM8: i32>(a: __m512i) -> __m512i {
/// Concatenate pairs of 16-byte blocks in a and b into a 32-byte temporary result, shift the result right by imm8 bytes, and store the low 16 bytes in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_alignr_epi8&expand=263)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_alignr_epi8&expand=263)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))]
@@ -9470,7 +9470,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
let b = b.as_i8x64();
let r: i8x64 = match IMM8 % 16 {
- 0 => simd_shuffle64!(
+ 0 => simd_shuffle!(
b,
a,
[
@@ -9479,7 +9479,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
],
),
- 1 => simd_shuffle64!(
+ 1 => simd_shuffle!(
b,
a,
[
@@ -9488,7 +9488,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
45, 46, 47, 96, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 112,
],
),
- 2 => simd_shuffle64!(
+ 2 => simd_shuffle!(
b,
a,
[
@@ -9497,7 +9497,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
46, 47, 96, 97, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 112, 113,
],
),
- 3 => simd_shuffle64!(
+ 3 => simd_shuffle!(
b,
a,
[
@@ -9507,7 +9507,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
114,
],
),
- 4 => simd_shuffle64!(
+ 4 => simd_shuffle!(
b,
a,
[
@@ -9517,7 +9517,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
115,
],
),
- 5 => simd_shuffle64!(
+ 5 => simd_shuffle!(
b,
a,
[
@@ -9527,7 +9527,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
115, 116,
],
),
- 6 => simd_shuffle64!(
+ 6 => simd_shuffle!(
b,
a,
[
@@ -9537,7 +9537,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
116, 117,
],
),
- 7 => simd_shuffle64!(
+ 7 => simd_shuffle!(
b,
a,
[
@@ -9547,7 +9547,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
116, 117, 118,
],
),
- 8 => simd_shuffle64!(
+ 8 => simd_shuffle!(
b,
a,
[
@@ -9557,7 +9557,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
116, 117, 118, 119,
],
),
- 9 => simd_shuffle64!(
+ 9 => simd_shuffle!(
b,
a,
[
@@ -9567,7 +9567,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
117, 118, 119, 120,
],
),
- 10 => simd_shuffle64!(
+ 10 => simd_shuffle!(
b,
a,
[
@@ -9577,7 +9577,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
118, 119, 120, 121,
],
),
- 11 => simd_shuffle64!(
+ 11 => simd_shuffle!(
b,
a,
[
@@ -9587,7 +9587,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
117, 118, 119, 120, 121, 122,
],
),
- 12 => simd_shuffle64!(
+ 12 => simd_shuffle!(
b,
a,
[
@@ -9597,7 +9597,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
118, 119, 120, 121, 122, 123,
],
),
- 13 => simd_shuffle64!(
+ 13 => simd_shuffle!(
b,
a,
[
@@ -9607,7 +9607,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
119, 120, 121, 122, 123, 124,
],
),
- 14 => simd_shuffle64!(
+ 14 => simd_shuffle!(
b,
a,
[
@@ -9617,7 +9617,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
120, 121, 122, 123, 124, 125,
],
),
- 15 => simd_shuffle64!(
+ 15 => simd_shuffle!(
b,
a,
[
@@ -9634,7 +9634,7 @@ pub unsafe fn _mm512_alignr_epi8<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
/// Concatenate pairs of 16-byte blocks in a and b into a 32-byte temporary result, shift the result right by imm8 bytes, and store the low 16 bytes in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_alignr_epi8&expand=264)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_alignr_epi8&expand=264)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))]
@@ -9645,14 +9645,14 @@ pub unsafe fn _mm512_mask_alignr_epi8<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm512_alignr_epi8::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i8x64(), src.as_i8x64()))
}
/// Concatenate pairs of 16-byte blocks in a and b into a 32-byte temporary result, shift the result right by imm8 bytes, and store the low 16 bytes in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_alignr_epi8&expand=265)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_alignr_epi8&expand=265)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))]
@@ -9662,7 +9662,7 @@ pub unsafe fn _mm512_maskz_alignr_epi8<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm512_alignr_epi8::<IMM8>(a, b);
let zero = _mm512_setzero_si512().as_i8x64();
transmute(simd_select_bitmask(k, r.as_i8x64(), zero))
@@ -9670,7 +9670,7 @@ pub unsafe fn _mm512_maskz_alignr_epi8<const IMM8: i32>(
/// Concatenate pairs of 16-byte blocks in a and b into a 32-byte temporary result, shift the result right by imm8 bytes, and store the low 16 bytes in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_alignr_epi8&expand=261)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_alignr_epi8&expand=261)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(4)]
@@ -9681,14 +9681,14 @@ pub unsafe fn _mm256_mask_alignr_epi8<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm256_alignr_epi8::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i8x32(), src.as_i8x32()))
}
/// Concatenate pairs of 16-byte blocks in a and b into a 32-byte temporary result, shift the result right by imm8 bytes, and store the low 16 bytes in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_alignr_epi8&expand=262)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_alignr_epi8&expand=262)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -9698,7 +9698,7 @@ pub unsafe fn _mm256_maskz_alignr_epi8<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm256_alignr_epi8::<IMM8>(a, b);
transmute(simd_select_bitmask(
k,
@@ -9709,7 +9709,7 @@ pub unsafe fn _mm256_maskz_alignr_epi8<const IMM8: i32>(
/// Concatenate pairs of 16-byte blocks in a and b into a 32-byte temporary result, shift the result right by imm8 bytes, and store the low 16 bytes in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_alignr_epi8&expand=258)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_alignr_epi8&expand=258)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(4)]
@@ -9720,14 +9720,14 @@ pub unsafe fn _mm_mask_alignr_epi8<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm_alignr_epi8::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i8x16(), src.as_i8x16()))
}
/// Concatenate pairs of 16-byte blocks in a and b into a 32-byte temporary result, shift the result right by imm8 bytes, and store the low 16 bytes in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_alignr_epi8&expand=259)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_alignr_epi8&expand=259)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -9737,7 +9737,7 @@ pub unsafe fn _mm_maskz_alignr_epi8<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm_alignr_epi8::<IMM8>(a, b);
let zero = _mm_setzero_si128().as_i8x16();
transmute(simd_select_bitmask(k, r.as_i8x16(), zero))
@@ -9745,7 +9745,7 @@ pub unsafe fn _mm_maskz_alignr_epi8<const IMM8: i32>(
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtsepi16_storeu_epi8&expand=1812)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi16_storeu_epi8&expand=1812)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovswb))]
@@ -9755,7 +9755,7 @@ pub unsafe fn _mm512_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32,
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtsepi16_storeu_epi8&expand=1811)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi16_storeu_epi8&expand=1811)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovswb))]
@@ -9765,7 +9765,7 @@ pub unsafe fn _mm256_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16,
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsepi16_storeu_epi8&expand=1810)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi16_storeu_epi8&expand=1810)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovswb))]
@@ -9775,7 +9775,7 @@ pub unsafe fn _mm_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi16_storeu_epi8&expand=1412)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi16_storeu_epi8&expand=1412)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovwb))]
@@ -9785,7 +9785,7 @@ pub unsafe fn _mm512_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32,
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi16_storeu_epi8&expand=1411)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi16_storeu_epi8&expand=1411)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovwb))]
@@ -9795,7 +9795,7 @@ pub unsafe fn _mm256_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16,
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi16_storeu_epi8&expand=1410)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi16_storeu_epi8&expand=1410)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovwb))]
@@ -9805,7 +9805,7 @@ pub unsafe fn _mm_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: _
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtusepi16_storeu_epi8&expand=2047)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi16_storeu_epi8&expand=2047)
#[inline]
#[target_feature(enable = "avx512bw")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
@@ -9815,7 +9815,7 @@ pub unsafe fn _mm512_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtusepi16_storeu_epi8&expand=2046)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi16_storeu_epi8&expand=2046)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
@@ -9825,7 +9825,7 @@ pub unsafe fn _mm256_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtusepi16_storeu_epi8&expand=2045)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi16_storeu_epi8&expand=2045)
#[inline]
#[target_feature(enable = "avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512cd.rs b/library/stdarch/crates/core_arch/src/x86/avx512cd.rs
index ac9d3aed3..a54b54763 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512cd.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512cd.rs
@@ -8,7 +8,7 @@ use stdarch_test::assert_instr;
/// Broadcast the low 16-bits from input mask k to all 32-bit elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_broadcastmw_epi32&expand=553)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastmw_epi32&expand=553)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmw2d
@@ -18,7 +18,7 @@ pub unsafe fn _mm512_broadcastmw_epi32(k: __mmask16) -> __m512i {
/// Broadcast the low 16-bits from input mask k to all 32-bit elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcastmw_epi32&expand=552)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastmw_epi32&expand=552)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmw2d
@@ -28,7 +28,7 @@ pub unsafe fn _mm256_broadcastmw_epi32(k: __mmask16) -> __m256i {
/// Broadcast the low 16-bits from input mask k to all 32-bit elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_broadcastmw_epi32&expand=551)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcastmw_epi32&expand=551)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmw2d
@@ -38,7 +38,7 @@ pub unsafe fn _mm_broadcastmw_epi32(k: __mmask16) -> __m128i {
/// Broadcast the low 8-bits from input mask k to all 64-bit elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_broadcastmb_epi64&expand=550)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastmb_epi64&expand=550)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmb2q
@@ -48,7 +48,7 @@ pub unsafe fn _mm512_broadcastmb_epi64(k: __mmask8) -> __m512i {
/// Broadcast the low 8-bits from input mask k to all 64-bit elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcastmb_epi64&expand=549)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastmb_epi64&expand=549)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmb2q
@@ -58,7 +58,7 @@ pub unsafe fn _mm256_broadcastmb_epi64(k: __mmask8) -> __m256i {
/// Broadcast the low 8-bits from input mask k to all 64-bit elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_broadcastmb_epi64&expand=548)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcastmb_epi64&expand=548)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcast))] // should be vpbroadcastmb2q
@@ -68,7 +68,7 @@ pub unsafe fn _mm_broadcastmb_epi64(k: __mmask8) -> __m128i {
/// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit. Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_conflict_epi32&expand=1248)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_conflict_epi32&expand=1248)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vpconflictd))]
@@ -78,7 +78,7 @@ pub unsafe fn _mm512_conflict_epi32(a: __m512i) -> __m512i {
/// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit using writemask k (elements are copied from src when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_conflict_epi32&expand=1249)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_conflict_epi32&expand=1249)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vpconflictd))]
@@ -89,7 +89,7 @@ pub unsafe fn _mm512_mask_conflict_epi32(src: __m512i, k: __mmask16, a: __m512i)
/// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_conflict_epi32&expand=1250)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_conflict_epi32&expand=1250)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vpconflictd))]
@@ -101,7 +101,7 @@ pub unsafe fn _mm512_maskz_conflict_epi32(k: __mmask16, a: __m512i) -> __m512i {
/// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit. Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_conflict_epi32&expand=1245)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_conflict_epi32&expand=1245)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpconflictd))]
@@ -111,7 +111,7 @@ pub unsafe fn _mm256_conflict_epi32(a: __m256i) -> __m256i {
/// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit using writemask k (elements are copied from src when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_conflict_epi32&expand=1246)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_conflict_epi32&expand=1246)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpconflictd))]
@@ -122,7 +122,7 @@ pub unsafe fn _mm256_mask_conflict_epi32(src: __m256i, k: __mmask8, a: __m256i)
/// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_conflict_epi32&expand=1247)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_conflict_epi32&expand=1247)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpconflictd))]
@@ -134,7 +134,7 @@ pub unsafe fn _mm256_maskz_conflict_epi32(k: __mmask8, a: __m256i) -> __m256i {
/// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit. Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_conflict_epi32&expand=1242)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_conflict_epi32&expand=1242)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpconflictd))]
@@ -144,7 +144,7 @@ pub unsafe fn _mm_conflict_epi32(a: __m128i) -> __m128i {
/// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit using writemask k (elements are copied from src when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_conflict_epi32&expand=1243)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_conflict_epi32&expand=1243)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpconflictd))]
@@ -155,7 +155,7 @@ pub unsafe fn _mm_mask_conflict_epi32(src: __m128i, k: __mmask8, a: __m128i) ->
/// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_conflict_epi32&expand=1244)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_conflict_epi32&expand=1244)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpconflictd))]
@@ -167,7 +167,7 @@ pub unsafe fn _mm_maskz_conflict_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit. Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_conflict_epi64&expand=1257)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_conflict_epi64&expand=1257)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vpconflictq))]
@@ -177,7 +177,7 @@ pub unsafe fn _mm512_conflict_epi64(a: __m512i) -> __m512i {
/// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit using writemask k (elements are copied from src when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_conflict_epi64&expand=1258)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_conflict_epi64&expand=1258)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vpconflictq))]
@@ -188,7 +188,7 @@ pub unsafe fn _mm512_mask_conflict_epi64(src: __m512i, k: __mmask8, a: __m512i)
/// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_conflict_epi64&expand=1259)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_conflict_epi64&expand=1259)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vpconflictq))]
@@ -200,7 +200,7 @@ pub unsafe fn _mm512_maskz_conflict_epi64(k: __mmask8, a: __m512i) -> __m512i {
/// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit. Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_conflict_epi64&expand=1254)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_conflict_epi64&expand=1254)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpconflictq))]
@@ -210,7 +210,7 @@ pub unsafe fn _mm256_conflict_epi64(a: __m256i) -> __m256i {
/// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit using writemask k (elements are copied from src when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_conflict_epi64&expand=1255)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_conflict_epi64&expand=1255)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpconflictq))]
@@ -221,7 +221,7 @@ pub unsafe fn _mm256_mask_conflict_epi64(src: __m256i, k: __mmask8, a: __m256i)
/// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_conflict_epi64&expand=1256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_conflict_epi64&expand=1256)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpconflictq))]
@@ -233,7 +233,7 @@ pub unsafe fn _mm256_maskz_conflict_epi64(k: __mmask8, a: __m256i) -> __m256i {
/// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit. Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_conflict_epi64&expand=1251)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_conflict_epi64&expand=1251)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpconflictq))]
@@ -243,7 +243,7 @@ pub unsafe fn _mm_conflict_epi64(a: __m128i) -> __m128i {
/// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit using writemask k (elements are copied from src when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_conflict_epi64&expand=1252)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_conflict_epi64&expand=1252)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpconflictq))]
@@ -254,7 +254,7 @@ pub unsafe fn _mm_mask_conflict_epi64(src: __m128i, k: __mmask8, a: __m128i) ->
/// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_conflict_epi64&expand=1253)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_conflict_epi64&expand=1253)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vpconflictq))]
@@ -266,7 +266,7 @@ pub unsafe fn _mm_maskz_conflict_epi64(k: __mmask8, a: __m128i) -> __m128i {
/// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_lzcnt_epi32&expand=3491)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_lzcnt_epi32&expand=3491)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vplzcntd))]
@@ -276,7 +276,7 @@ pub unsafe fn _mm512_lzcnt_epi32(a: __m512i) -> __m512i {
/// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_lzcnt_epi32&expand=3492)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_lzcnt_epi32&expand=3492)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vplzcntd))]
@@ -287,7 +287,7 @@ pub unsafe fn _mm512_mask_lzcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) ->
/// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_lzcnt_epi32&expand=3493)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_lzcnt_epi32&expand=3493)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vplzcntd))]
@@ -299,7 +299,7 @@ pub unsafe fn _mm512_maskz_lzcnt_epi32(k: __mmask16, a: __m512i) -> __m512i {
/// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_lzcnt_epi32&expand=3488)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_lzcnt_epi32&expand=3488)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vplzcntd))]
@@ -309,7 +309,7 @@ pub unsafe fn _mm256_lzcnt_epi32(a: __m256i) -> __m256i {
/// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_lzcnt_epi32&expand=3489)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_lzcnt_epi32&expand=3489)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vplzcntd))]
@@ -320,7 +320,7 @@ pub unsafe fn _mm256_mask_lzcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) ->
/// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_lzcnt_epi32&expand=3490)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_lzcnt_epi32&expand=3490)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vplzcntd))]
@@ -332,7 +332,7 @@ pub unsafe fn _mm256_maskz_lzcnt_epi32(k: __mmask8, a: __m256i) -> __m256i {
/// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_lzcnt_epi32&expand=3485)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lzcnt_epi32&expand=3485)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vplzcntd))]
@@ -342,7 +342,7 @@ pub unsafe fn _mm_lzcnt_epi32(a: __m128i) -> __m128i {
/// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_lzcnt_epi32&expand=3486)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_lzcnt_epi32&expand=3486)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vplzcntd))]
@@ -353,7 +353,7 @@ pub unsafe fn _mm_mask_lzcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m
/// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_lzcnt_epi32&expand=3487)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_lzcnt_epi32&expand=3487)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vplzcntd))]
@@ -365,7 +365,7 @@ pub unsafe fn _mm_maskz_lzcnt_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_lzcnt_epi64&expand=3500)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_lzcnt_epi64&expand=3500)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vplzcntq))]
@@ -375,7 +375,7 @@ pub unsafe fn _mm512_lzcnt_epi64(a: __m512i) -> __m512i {
/// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_lzcnt_epi64&expand=3501)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_lzcnt_epi64&expand=3501)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vplzcntq))]
@@ -386,7 +386,7 @@ pub unsafe fn _mm512_mask_lzcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) ->
/// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_lzcnt_epi64&expand=3502)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_lzcnt_epi64&expand=3502)
#[inline]
#[target_feature(enable = "avx512cd")]
#[cfg_attr(test, assert_instr(vplzcntq))]
@@ -398,7 +398,7 @@ pub unsafe fn _mm512_maskz_lzcnt_epi64(k: __mmask8, a: __m512i) -> __m512i {
/// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_lzcnt_epi64&expand=3497)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_lzcnt_epi64&expand=3497)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vplzcntq))]
@@ -408,7 +408,7 @@ pub unsafe fn _mm256_lzcnt_epi64(a: __m256i) -> __m256i {
/// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_lzcnt_epi64&expand=3498)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_lzcnt_epi64&expand=3498)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vplzcntq))]
@@ -419,7 +419,7 @@ pub unsafe fn _mm256_mask_lzcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) ->
/// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_lzcnt_epi64&expand=3499)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_lzcnt_epi64&expand=3499)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vplzcntq))]
@@ -431,7 +431,7 @@ pub unsafe fn _mm256_maskz_lzcnt_epi64(k: __mmask8, a: __m256i) -> __m256i {
/// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_lzcnt_epi64&expand=3494)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lzcnt_epi64&expand=3494)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vplzcntq))]
@@ -441,7 +441,7 @@ pub unsafe fn _mm_lzcnt_epi64(a: __m128i) -> __m128i {
/// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_lzcnt_epi64&expand=3495)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_lzcnt_epi64&expand=3495)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vplzcntq))]
@@ -452,7 +452,7 @@ pub unsafe fn _mm_mask_lzcnt_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m
/// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_lzcnt_epi64&expand=3496)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_lzcnt_epi64&expand=3496)
#[inline]
#[target_feature(enable = "avx512cd,avx512vl")]
#[cfg_attr(test, assert_instr(vplzcntq))]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs
index 0ddb51283..e0014f7ed 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs
@@ -41,7 +41,7 @@ use stdarch_test::assert_instr;
/// Computes the absolute values of packed 32-bit integers in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_abs_epi32&expand=39)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_epi32&expand=39)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpabsd))]
@@ -58,7 +58,7 @@ pub unsafe fn _mm512_abs_epi32(a: __m512i) -> __m512i {
/// unsigned results in `dst` using writemask `k` (elements are copied from
/// `src` when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_abs_epi32&expand=40)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_epi32&expand=40)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpabsd))]
@@ -71,7 +71,7 @@ pub unsafe fn _mm512_mask_abs_epi32(src: __m512i, k: __mmask16, a: __m512i) -> _
/// unsigned results in `dst` using zeromask `k` (elements are zeroed out when
/// the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_abs_epi32&expand=41)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_abs_epi32&expand=41)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpabsd))]
@@ -83,7 +83,7 @@ pub unsafe fn _mm512_maskz_abs_epi32(k: __mmask16, a: __m512i) -> __m512i {
/// Compute the absolute value of packed signed 32-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_abs_epi32&expand=37)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_abs_epi32&expand=37)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsd))]
@@ -94,7 +94,7 @@ pub unsafe fn _mm256_mask_abs_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __
/// Compute the absolute value of packed signed 32-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_abs_epi32&expand=38)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_abs_epi32&expand=38)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsd))]
@@ -106,7 +106,7 @@ pub unsafe fn _mm256_maskz_abs_epi32(k: __mmask8, a: __m256i) -> __m256i {
/// Compute the absolute value of packed signed 32-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_abs_epi32&expand=34)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_abs_epi32&expand=34)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsd))]
@@ -117,7 +117,7 @@ pub unsafe fn _mm_mask_abs_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m12
/// Compute the absolute value of packed signed 32-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_abs_epi32&expand=35)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_abs_epi32&expand=35)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsd))]
@@ -129,7 +129,7 @@ pub unsafe fn _mm_maskz_abs_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Compute the absolute value of packed signed 64-bit integers in a, and store the unsigned results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_abs_epi64&expand=48)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_epi64&expand=48)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpabsq))]
@@ -144,7 +144,7 @@ pub unsafe fn _mm512_abs_epi64(a: __m512i) -> __m512i {
/// Compute the absolute value of packed signed 64-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_abs_epi64&expand=49)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_epi64&expand=49)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpabsq))]
@@ -155,7 +155,7 @@ pub unsafe fn _mm512_mask_abs_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __
/// Compute the absolute value of packed signed 64-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_abs_epi64&expand=50)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_abs_epi64&expand=50)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpabsq))]
@@ -167,7 +167,7 @@ pub unsafe fn _mm512_maskz_abs_epi64(k: __mmask8, a: __m512i) -> __m512i {
/// Compute the absolute value of packed signed 64-bit integers in a, and store the unsigned results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_abs_epi64&expand=45)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_abs_epi64&expand=45)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsq))]
@@ -182,7 +182,7 @@ pub unsafe fn _mm256_abs_epi64(a: __m256i) -> __m256i {
/// Compute the absolute value of packed signed 64-bit integers in a, and store the unsigned results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_abs_epi64&expand=46)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_abs_epi64&expand=46)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsq))]
@@ -193,7 +193,7 @@ pub unsafe fn _mm256_mask_abs_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __
/// Compute the absolute value of packed signed 64-bit integers in a, and store the unsigned results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_abs_epi64&expand=45)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_abs_epi64&expand=45)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpabsq))]
@@ -205,7 +205,7 @@ pub unsafe fn _mm256_maskz_abs_epi64(k: __mmask8, a: __m256i) -> __m256i {
/// Finds the absolute value of each packed single-precision (32-bit) floating-point element in v2, storing the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_abs_ps&expand=65)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_ps&expand=65)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandq))]
@@ -218,7 +218,7 @@ pub unsafe fn _mm512_abs_ps(v2: __m512) -> __m512 {
/// Finds the absolute value of each packed single-precision (32-bit) floating-point element in v2, storing the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_abs_ps&expand=66)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_ps&expand=66)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandd))]
@@ -229,7 +229,7 @@ pub unsafe fn _mm512_mask_abs_ps(src: __m512, k: __mmask16, v2: __m512) -> __m51
/// Finds the absolute value of each packed double-precision (64-bit) floating-point element in v2, storing the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_abs_pd&expand=60)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_abs_pd&expand=60)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandq))]
@@ -242,7 +242,7 @@ pub unsafe fn _mm512_abs_pd(v2: __m512d) -> __m512d {
/// Finds the absolute value of each packed double-precision (64-bit) floating-point element in v2, storing the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_abs_pd&expand=61)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_abs_pd&expand=61)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandq))]
@@ -253,7 +253,7 @@ pub unsafe fn _mm512_mask_abs_pd(src: __m512d, k: __mmask8, v2: __m512d) -> __m5
/// Move packed 32-bit integers from a to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mov_epi32&expand=3801)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mov_epi32&expand=3801)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovdqa32))]
@@ -264,7 +264,7 @@ pub unsafe fn _mm512_mask_mov_epi32(src: __m512i, k: __mmask16, a: __m512i) -> _
/// Move packed 32-bit integers from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mov_epi32&expand=3802)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mov_epi32&expand=3802)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovdqa32))]
@@ -276,7 +276,7 @@ pub unsafe fn _mm512_maskz_mov_epi32(k: __mmask16, a: __m512i) -> __m512i {
/// Move packed 32-bit integers from a to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mov_epi32&expand=3799)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mov_epi32&expand=3799)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqa32))]
@@ -287,7 +287,7 @@ pub unsafe fn _mm256_mask_mov_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __
/// Move packed 32-bit integers from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mov_epi32&expand=3800)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mov_epi32&expand=3800)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqa32))]
@@ -299,7 +299,7 @@ pub unsafe fn _mm256_maskz_mov_epi32(k: __mmask8, a: __m256i) -> __m256i {
/// Move packed 32-bit integers from a to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mov_epi32&expand=3797)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mov_epi32&expand=3797)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqa32))]
@@ -310,7 +310,7 @@ pub unsafe fn _mm_mask_mov_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m12
/// Move packed 32-bit integers from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mov_epi32&expand=3798)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mov_epi32&expand=3798)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqa32))]
@@ -322,7 +322,7 @@ pub unsafe fn _mm_maskz_mov_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Move packed 64-bit integers from a to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mov_epi64&expand=3807)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mov_epi64&expand=3807)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovdqa64))]
@@ -333,7 +333,7 @@ pub unsafe fn _mm512_mask_mov_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __
/// Move packed 64-bit integers from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mov_epi64&expand=3808)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mov_epi64&expand=3808)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovdqa64))]
@@ -345,7 +345,7 @@ pub unsafe fn _mm512_maskz_mov_epi64(k: __mmask8, a: __m512i) -> __m512i {
/// Move packed 64-bit integers from a to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mov_epi64&expand=3805)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mov_epi64&expand=3805)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqa64))]
@@ -356,7 +356,7 @@ pub unsafe fn _mm256_mask_mov_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __
/// Move packed 64-bit integers from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mov_epi64&expand=3806)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mov_epi64&expand=3806)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqa64))]
@@ -368,7 +368,7 @@ pub unsafe fn _mm256_maskz_mov_epi64(k: __mmask8, a: __m256i) -> __m256i {
/// Move packed 64-bit integers from a to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mov_epi64&expand=3803)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mov_epi64&expand=3803)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqa64))]
@@ -379,7 +379,7 @@ pub unsafe fn _mm_mask_mov_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m12
/// Move packed 64-bit integers from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mov_epi64&expand=3804)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mov_epi64&expand=3804)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqa64))]
@@ -391,7 +391,7 @@ pub unsafe fn _mm_maskz_mov_epi64(k: __mmask8, a: __m128i) -> __m128i {
/// Move packed single-precision (32-bit) floating-point elements from a to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mov_ps&expand=3825)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mov_ps&expand=3825)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))]
@@ -402,7 +402,7 @@ pub unsafe fn _mm512_mask_mov_ps(src: __m512, k: __mmask16, a: __m512) -> __m512
/// Move packed single-precision (32-bit) floating-point elements from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mov_ps&expand=3826)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mov_ps&expand=3826)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))]
@@ -414,7 +414,7 @@ pub unsafe fn _mm512_maskz_mov_ps(k: __mmask16, a: __m512) -> __m512 {
/// Move packed single-precision (32-bit) floating-point elements from a to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mov_ps&expand=3823)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mov_ps&expand=3823)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))]
@@ -425,7 +425,7 @@ pub unsafe fn _mm256_mask_mov_ps(src: __m256, k: __mmask8, a: __m256) -> __m256
/// Move packed single-precision (32-bit) floating-point elements from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mov_ps&expand=3824)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mov_ps&expand=3824)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))]
@@ -437,7 +437,7 @@ pub unsafe fn _mm256_maskz_mov_ps(k: __mmask8, a: __m256) -> __m256 {
/// Move packed single-precision (32-bit) floating-point elements from a to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mov_ps&expand=3821)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mov_ps&expand=3821)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))]
@@ -448,7 +448,7 @@ pub unsafe fn _mm_mask_mov_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 {
/// Move packed single-precision (32-bit) floating-point elements from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mov_ps&expand=3822)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mov_ps&expand=3822)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))]
@@ -460,7 +460,7 @@ pub unsafe fn _mm_maskz_mov_ps(k: __mmask8, a: __m128) -> __m128 {
/// Move packed double-precision (64-bit) floating-point elements from a to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mov_pd&expand=3819)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mov_pd&expand=3819)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovapd))]
@@ -471,7 +471,7 @@ pub unsafe fn _mm512_mask_mov_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m51
/// Move packed double-precision (64-bit) floating-point elements from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mov_pd&expand=3820)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mov_pd&expand=3820)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovapd))]
@@ -483,7 +483,7 @@ pub unsafe fn _mm512_maskz_mov_pd(k: __mmask8, a: __m512d) -> __m512d {
/// Move packed double-precision (64-bit) floating-point elements from a to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mov_pd&expand=3817)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mov_pd&expand=3817)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovapd))]
@@ -494,7 +494,7 @@ pub unsafe fn _mm256_mask_mov_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m25
/// Move packed double-precision (64-bit) floating-point elements from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mov_pd&expand=3818)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mov_pd&expand=3818)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovapd))]
@@ -506,7 +506,7 @@ pub unsafe fn _mm256_maskz_mov_pd(k: __mmask8, a: __m256d) -> __m256d {
/// Move packed double-precision (64-bit) floating-point elements from a to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mov_pd&expand=3815)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mov_pd&expand=3815)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovapd))]
@@ -517,7 +517,7 @@ pub unsafe fn _mm_mask_mov_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d
/// Move packed double-precision (64-bit) floating-point elements from a into dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mov_pd&expand=3816)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mov_pd&expand=3816)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovapd))]
@@ -529,7 +529,7 @@ pub unsafe fn _mm_maskz_mov_pd(k: __mmask8, a: __m128d) -> __m128d {
/// Add packed 32-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_add_epi32&expand=100)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_epi32&expand=100)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpaddd))]
@@ -539,7 +539,7 @@ pub unsafe fn _mm512_add_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Add packed 32-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_add_epi32&expand=101)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_epi32&expand=101)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpaddd))]
@@ -550,7 +550,7 @@ pub unsafe fn _mm512_mask_add_epi32(src: __m512i, k: __mmask16, a: __m512i, b: _
/// Add packed 32-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_add_epi32&expand=102)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_epi32&expand=102)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpaddd))]
@@ -562,7 +562,7 @@ pub unsafe fn _mm512_maskz_add_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __
/// Add packed 32-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_add_epi32&expand=98)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_epi32&expand=98)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddd))]
@@ -573,7 +573,7 @@ pub unsafe fn _mm256_mask_add_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Add packed 32-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_add_epi32&expand=99)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_epi32&expand=99)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddd))]
@@ -585,7 +585,7 @@ pub unsafe fn _mm256_maskz_add_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Add packed 32-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_add_epi32&expand=95)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_epi32&expand=95)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddd))]
@@ -596,7 +596,7 @@ pub unsafe fn _mm_mask_add_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Add packed 32-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_epi32&expand=96)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_epi32&expand=96)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddd))]
@@ -608,7 +608,7 @@ pub unsafe fn _mm_maskz_add_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Add packed 64-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_add_epi64&expand=109)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_epi64&expand=109)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpaddq))]
@@ -618,7 +618,7 @@ pub unsafe fn _mm512_add_epi64(a: __m512i, b: __m512i) -> __m512i {
/// Add packed 64-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_add_epi64&expand=110)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_epi64&expand=110)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpaddq))]
@@ -629,7 +629,7 @@ pub unsafe fn _mm512_mask_add_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __
/// Add packed 64-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_add_epi64&expand=111)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_epi64&expand=111)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpaddq))]
@@ -641,7 +641,7 @@ pub unsafe fn _mm512_maskz_add_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m
/// Add packed 64-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_add_epi64&expand=107)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_epi64&expand=107)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddq))]
@@ -652,7 +652,7 @@ pub unsafe fn _mm256_mask_add_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Add packed 64-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_add_epi64&expand=108)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_epi64&expand=108)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddq))]
@@ -664,7 +664,7 @@ pub unsafe fn _mm256_maskz_add_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Add packed 64-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_add_epi64&expand=104)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_epi64&expand=104)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddq))]
@@ -675,7 +675,7 @@ pub unsafe fn _mm_mask_add_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Add packed 64-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_epi64&expand=105)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_epi64&expand=105)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpaddq))]
@@ -687,7 +687,7 @@ pub unsafe fn _mm_maskz_add_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Add packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_add_ps&expand=139)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_ps&expand=139)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddps))]
@@ -697,7 +697,7 @@ pub unsafe fn _mm512_add_ps(a: __m512, b: __m512) -> __m512 {
/// Add packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_add_ps&expand=140)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_ps&expand=140)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddps))]
@@ -708,7 +708,7 @@ pub unsafe fn _mm512_mask_add_ps(src: __m512, k: __mmask16, a: __m512, b: __m512
/// Add packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_add_ps&expand=141)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_ps&expand=141)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddps))]
@@ -720,7 +720,7 @@ pub unsafe fn _mm512_maskz_add_ps(k: __mmask16, a: __m512, b: __m512) -> __m512
/// Add packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_add_ps&expand=137)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_ps&expand=137)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vaddps))]
@@ -731,7 +731,7 @@ pub unsafe fn _mm256_mask_add_ps(src: __m256, k: __mmask8, a: __m256, b: __m256)
/// Add packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_add_ps&expand=138)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_ps&expand=138)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vaddps))]
@@ -743,7 +743,7 @@ pub unsafe fn _mm256_maskz_add_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 {
/// Add packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_add_ps&expand=134)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_ps&expand=134)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vaddps))]
@@ -754,7 +754,7 @@ pub unsafe fn _mm_mask_add_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
/// Add packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_ps&expand=135)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_ps&expand=135)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vaddps))]
@@ -766,7 +766,7 @@ pub unsafe fn _mm_maskz_add_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Add packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_add_pd&expand=127)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_pd&expand=127)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddpd))]
@@ -776,7 +776,7 @@ pub unsafe fn _mm512_add_pd(a: __m512d, b: __m512d) -> __m512d {
/// Add packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_add_pd&expand=128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_pd&expand=128)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddpd))]
@@ -787,7 +787,7 @@ pub unsafe fn _mm512_mask_add_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m51
/// Add packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_add_pd&expand=129)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_pd&expand=129)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddpd))]
@@ -799,7 +799,7 @@ pub unsafe fn _mm512_maskz_add_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512
/// Add packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_add_pd&expand=125)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_add_pd&expand=125)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vaddpd))]
@@ -810,7 +810,7 @@ pub unsafe fn _mm256_mask_add_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m25
/// Add packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_add_pd&expand=126)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_add_pd&expand=126)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vaddpd))]
@@ -822,7 +822,7 @@ pub unsafe fn _mm256_maskz_add_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256
/// Add packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_add_pd&expand=122)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_add_pd&expand=122)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vaddpd))]
@@ -833,7 +833,7 @@ pub unsafe fn _mm_mask_add_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
/// Add packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_pd&expand=123)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_pd&expand=123)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vaddpd))]
@@ -845,7 +845,7 @@ pub unsafe fn _mm_maskz_add_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
/// Subtract packed 32-bit integers in b from packed 32-bit integers in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sub_epi32&expand=5694)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_epi32&expand=5694)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsubd))]
@@ -855,7 +855,7 @@ pub unsafe fn _mm512_sub_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Subtract packed 32-bit integers in b from packed 32-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sub_epi32&expand=5692)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_epi32&expand=5692)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsubd))]
@@ -866,7 +866,7 @@ pub unsafe fn _mm512_mask_sub_epi32(src: __m512i, k: __mmask16, a: __m512i, b: _
/// Subtract packed 32-bit integers in b from packed 32-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sub_epi32&expand=5693)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_epi32&expand=5693)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsubd))]
@@ -878,7 +878,7 @@ pub unsafe fn _mm512_maskz_sub_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __
/// Subtract packed 32-bit integers in b from packed 32-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sub_epi32&expand=5689)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_epi32&expand=5689)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubd))]
@@ -889,7 +889,7 @@ pub unsafe fn _mm256_mask_sub_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Subtract packed 32-bit integers in b from packed 32-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sub_epi32&expand=5690)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_epi32&expand=5690)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubd))]
@@ -901,7 +901,7 @@ pub unsafe fn _mm256_maskz_sub_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Subtract packed 32-bit integers in b from packed 32-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sub_epi32&expand=5686)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_epi32&expand=5686)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubd))]
@@ -912,7 +912,7 @@ pub unsafe fn _mm_mask_sub_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Subtract packed 32-bit integers in b from packed 32-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sub_epi32&expand=5687)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_epi32&expand=5687)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubd))]
@@ -924,7 +924,7 @@ pub unsafe fn _mm_maskz_sub_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Subtract packed 64-bit integers in b from packed 64-bit integers in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sub_epi64&expand=5703)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_epi64&expand=5703)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsubq))]
@@ -934,7 +934,7 @@ pub unsafe fn _mm512_sub_epi64(a: __m512i, b: __m512i) -> __m512i {
/// Subtract packed 64-bit integers in b from packed 64-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sub_epi64&expand=5701)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_epi64&expand=5701)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsubq))]
@@ -945,7 +945,7 @@ pub unsafe fn _mm512_mask_sub_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __
/// Subtract packed 64-bit integers in b from packed 64-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sub_epi64&expand=5702)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_epi64&expand=5702)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsubq))]
@@ -957,7 +957,7 @@ pub unsafe fn _mm512_maskz_sub_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m
/// Subtract packed 64-bit integers in b from packed 64-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sub_epi64&expand=5698)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_epi64&expand=5698)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubq))]
@@ -968,7 +968,7 @@ pub unsafe fn _mm256_mask_sub_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Subtract packed 64-bit integers in b from packed 64-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sub_epi64&expand=5699)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_epi64&expand=5699)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubq))]
@@ -980,7 +980,7 @@ pub unsafe fn _mm256_maskz_sub_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Subtract packed 64-bit integers in b from packed 64-bit integers in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sub_epi64&expand=5695)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_epi64&expand=5695)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubq))]
@@ -991,7 +991,7 @@ pub unsafe fn _mm_mask_sub_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Subtract packed 64-bit integers in b from packed 64-bit integers in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sub_epi64&expand=5696)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_epi64&expand=5696)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsubq))]
@@ -1003,7 +1003,7 @@ pub unsafe fn _mm_maskz_sub_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Subtract packed single-precision (32-bit) floating-point elements in b from packed single-precision (32-bit) floating-point elements in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sub_ps&expand=5733)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_ps&expand=5733)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubps))]
@@ -1013,7 +1013,7 @@ pub unsafe fn _mm512_sub_ps(a: __m512, b: __m512) -> __m512 {
/// Subtract packed single-precision (32-bit) floating-point elements in b from packed single-precision (32-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sub_ps&expand=5731)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_ps&expand=5731)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubps))]
@@ -1024,7 +1024,7 @@ pub unsafe fn _mm512_mask_sub_ps(src: __m512, k: __mmask16, a: __m512, b: __m512
/// Subtract packed single-precision (32-bit) floating-point elements in b from packed single-precision (32-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sub_ps&expand=5732)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_ps&expand=5732)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubps))]
@@ -1036,7 +1036,7 @@ pub unsafe fn _mm512_maskz_sub_ps(k: __mmask16, a: __m512, b: __m512) -> __m512
/// Subtract packed single-precision (32-bit) floating-point elements in b from packed single-precision (32-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sub_ps&expand=5728)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_ps&expand=5728)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsubps))]
@@ -1047,7 +1047,7 @@ pub unsafe fn _mm256_mask_sub_ps(src: __m256, k: __mmask8, a: __m256, b: __m256)
/// Subtract packed single-precision (32-bit) floating-point elements in b from packed single-precision (32-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sub_ps&expand=5729)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_ps&expand=5729)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsubps))]
@@ -1059,7 +1059,7 @@ pub unsafe fn _mm256_maskz_sub_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 {
/// Subtract packed single-precision (32-bit) floating-point elements in b from packed single-precision (32-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sub_ps&expand=5725)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_ps&expand=5725)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsubps))]
@@ -1070,7 +1070,7 @@ pub unsafe fn _mm_mask_sub_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
/// Subtract packed single-precision (32-bit) floating-point elements in b from packed single-precision (32-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sub_ps&expand=5726)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_ps&expand=5726)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsubps))]
@@ -1082,7 +1082,7 @@ pub unsafe fn _mm_maskz_sub_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Subtract packed double-precision (64-bit) floating-point elements in b from packed double-precision (64-bit) floating-point elements in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sub_pd&expand=5721)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_pd&expand=5721)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubpd))]
@@ -1092,7 +1092,7 @@ pub unsafe fn _mm512_sub_pd(a: __m512d, b: __m512d) -> __m512d {
/// Subtract packed double-precision (64-bit) floating-point elements in b from packed double-precision (64-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sub_pd&expand=5719)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_pd&expand=5719)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubpd))]
@@ -1103,7 +1103,7 @@ pub unsafe fn _mm512_mask_sub_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m51
/// Subtract packed double-precision (64-bit) floating-point elements in b from packed double-precision (64-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sub_pd&expand=5720)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_pd&expand=5720)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubpd))]
@@ -1115,7 +1115,7 @@ pub unsafe fn _mm512_maskz_sub_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512
/// Subtract packed double-precision (64-bit) floating-point elements in b from packed double-precision (64-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sub_pd&expand=5716)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sub_pd&expand=5716)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsubpd))]
@@ -1126,7 +1126,7 @@ pub unsafe fn _mm256_mask_sub_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m25
/// Subtract packed double-precision (64-bit) floating-point elements in b from packed double-precision (64-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sub_pd&expand=5717)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sub_pd&expand=5717)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsubpd))]
@@ -1138,7 +1138,7 @@ pub unsafe fn _mm256_maskz_sub_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256
/// Subtract packed double-precision (64-bit) floating-point elements in b from packed double-precision (64-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sub_pd&expand=5713)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sub_pd&expand=5713)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsubpd))]
@@ -1149,7 +1149,7 @@ pub unsafe fn _mm_mask_sub_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
/// Subtract packed double-precision (64-bit) floating-point elements in b from packed double-precision (64-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sub_pd&expand=5714)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sub_pd&expand=5714)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsubpd))]
@@ -1161,7 +1161,7 @@ pub unsafe fn _mm_maskz_sub_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
/// Multiply the low signed 32-bit integers from each packed 64-bit element in a and b, and store the signed 64-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_epi32&expand=3907)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mul_epi32&expand=3907)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmuldq))]
@@ -1171,7 +1171,7 @@ pub unsafe fn _mm512_mul_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Multiply the low signed 32-bit integers from each packed 64-bit element in a and b, and store the signed 64-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_epi32&expand=3905)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mul_epi32&expand=3905)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmuldq))]
@@ -1182,7 +1182,7 @@ pub unsafe fn _mm512_mask_mul_epi32(src: __m512i, k: __mmask8, a: __m512i, b: __
/// Multiply the low signed 32-bit integers from each packed 64-bit element in a and b, and store the signed 64-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_epi32&expand=3906)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mul_epi32&expand=3906)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmuldq))]
@@ -1194,7 +1194,7 @@ pub unsafe fn _mm512_maskz_mul_epi32(k: __mmask8, a: __m512i, b: __m512i) -> __m
/// Multiply the low signed 32-bit integers from each packed 64-bit element in a and b, and store the signed 64-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mul_epi32&expand=3902)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mul_epi32&expand=3902)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmuldq))]
@@ -1205,7 +1205,7 @@ pub unsafe fn _mm256_mask_mul_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Multiply the low signed 32-bit integers from each packed 64-bit element in a and b, and store the signed 64-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mul_epi32&expand=3903)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mul_epi32&expand=3903)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmuldq))]
@@ -1217,7 +1217,7 @@ pub unsafe fn _mm256_maskz_mul_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Multiply the low signed 32-bit integers from each packed 64-bit element in a and b, and store the signed 64-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_epi32&expand=3899)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mul_epi32&expand=3899)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmuldq))]
@@ -1228,7 +1228,7 @@ pub unsafe fn _mm_mask_mul_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Multiply the low signed 32-bit integers from each packed 64-bit element in a and b, and store the signed 64-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_epi32&expand=3900)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mul_epi32&expand=3900)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmuldq))]
@@ -1240,7 +1240,7 @@ pub unsafe fn _mm_maskz_mul_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Multiply the packed 32-bit integers in a and b, producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mullo_epi&expand=4005)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mullo_epi&expand=4005)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmulld))]
@@ -1250,7 +1250,7 @@ pub unsafe fn _mm512_mullo_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Multiply the packed 32-bit integers in a and b, producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mullo_epi32&expand=4003)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mullo_epi32&expand=4003)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmulld))]
@@ -1266,7 +1266,7 @@ pub unsafe fn _mm512_mask_mullo_epi32(
/// Multiply the packed 32-bit integers in a and b, producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mullo_epi32&expand=4004)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mullo_epi32&expand=4004)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmulld))]
@@ -1278,7 +1278,7 @@ pub unsafe fn _mm512_maskz_mullo_epi32(k: __mmask16, a: __m512i, b: __m512i) ->
/// Multiply the packed 32-bit integers in a and b, producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mullo_epi32&expand=4000)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mullo_epi32&expand=4000)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulld))]
@@ -1294,7 +1294,7 @@ pub unsafe fn _mm256_mask_mullo_epi32(
/// Multiply the packed 32-bit integers in a and b, producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mullo_epi32&expand=4001)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mullo_epi32&expand=4001)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulld))]
@@ -1306,7 +1306,7 @@ pub unsafe fn _mm256_maskz_mullo_epi32(k: __mmask8, a: __m256i, b: __m256i) -> _
/// Multiply the packed 32-bit integers in a and b, producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mullo_epi32&expand=3997)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mullo_epi32&expand=3997)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulld))]
@@ -1317,7 +1317,7 @@ pub unsafe fn _mm_mask_mullo_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m
/// Multiply the packed 32-bit integers in a and b, producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mullo_epi32&expand=3998)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mullo_epi32&expand=3998)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmulld))]
@@ -1329,7 +1329,7 @@ pub unsafe fn _mm_maskz_mullo_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m1
/// Multiplies elements in packed 64-bit integer vectors a and b together, storing the lower 64 bits of the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_mullox_epi64&expand=4017)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mullox_epi64&expand=4017)
///
/// This intrinsic generates a sequence of instructions, which may perform worse than a native instruction. Consider the performance impact of this intrinsic.
#[inline]
@@ -1340,7 +1340,7 @@ pub unsafe fn _mm512_mullox_epi64(a: __m512i, b: __m512i) -> __m512i {
/// Multiplies elements in packed 64-bit integer vectors a and b together, storing the lower 64 bits of the result in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_mask_mullox&expand=4016)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_mullox&expand=4016)
///
/// This intrinsic generates a sequence of instructions, which may perform worse than a native instruction. Consider the performance impact of this intrinsic.
#[inline]
@@ -1357,7 +1357,7 @@ pub unsafe fn _mm512_mask_mullox_epi64(
/// Multiply the low unsigned 32-bit integers from each packed 64-bit element in a and b, and store the unsigned 64-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_mul_epu32&expand=3916)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mul_epu32&expand=3916)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmuludq))]
@@ -1367,7 +1367,7 @@ pub unsafe fn _mm512_mul_epu32(a: __m512i, b: __m512i) -> __m512i {
/// Multiply the low unsigned 32-bit integers from each packed 64-bit element in a and b, and store the unsigned 64-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_mask_mul_epu32&expand=3914)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_mul_epu32&expand=3914)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmuludq))]
@@ -1378,7 +1378,7 @@ pub unsafe fn _mm512_mask_mul_epu32(src: __m512i, k: __mmask8, a: __m512i, b: __
/// Multiply the low unsigned 32-bit integers from each packed 64-bit element in a and b, and store the unsigned 64-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_maskz_mul_epu32&expand=3915)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_mul_epu32&expand=3915)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmuludq))]
@@ -1390,7 +1390,7 @@ pub unsafe fn _mm512_maskz_mul_epu32(k: __mmask8, a: __m512i, b: __m512i) -> __m
/// Multiply the low unsigned 32-bit integers from each packed 64-bit element in a and b, and store the unsigned 64-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mul_epu32&expand=3911)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mul_epu32&expand=3911)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmuludq))]
@@ -1401,7 +1401,7 @@ pub unsafe fn _mm256_mask_mul_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Multiply the low unsigned 32-bit integers from each packed 64-bit element in a and b, and store the unsigned 64-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mul_epu32&expand=3912)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mul_epu32&expand=3912)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmuludq))]
@@ -1413,7 +1413,7 @@ pub unsafe fn _mm256_maskz_mul_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Multiply the low unsigned 32-bit integers from each packed 64-bit element in a and b, and store the unsigned 64-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_epu32&expand=3908)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mul_epu32&expand=3908)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmuludq))]
@@ -1424,7 +1424,7 @@ pub unsafe fn _mm_mask_mul_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Multiply the low unsigned 32-bit integers from each packed 64-bit element in a and b, and store the unsigned 64-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_epu32&expand=3909)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mul_epu32&expand=3909)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmuludq))]
@@ -1436,7 +1436,7 @@ pub unsafe fn _mm_maskz_mul_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_ps&expand=3934)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mul_ps&expand=3934)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulps))]
@@ -1446,7 +1446,7 @@ pub unsafe fn _mm512_mul_ps(a: __m512, b: __m512) -> __m512 {
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_ps&expand=3932)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mul_ps&expand=3932)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulps))]
@@ -1457,7 +1457,7 @@ pub unsafe fn _mm512_mask_mul_ps(src: __m512, k: __mmask16, a: __m512, b: __m512
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_ps&expand=3933)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mul_ps&expand=3933)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulps))]
@@ -1469,7 +1469,7 @@ pub unsafe fn _mm512_maskz_mul_ps(k: __mmask16, a: __m512, b: __m512) -> __m512
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mul_ps&expand=3929)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mul_ps&expand=3929)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmulps))]
@@ -1480,7 +1480,7 @@ pub unsafe fn _mm256_mask_mul_ps(src: __m256, k: __mmask8, a: __m256, b: __m256)
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mul_ps&expand=3930)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mul_ps&expand=3930)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmulps))]
@@ -1492,7 +1492,7 @@ pub unsafe fn _mm256_maskz_mul_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 {
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_ps&expand=3926)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mul_ps&expand=3926)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmulps))]
@@ -1503,7 +1503,7 @@ pub unsafe fn _mm_mask_mul_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_ps&expand=3927)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mul_ps&expand=3927)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmulps))]
@@ -1515,7 +1515,7 @@ pub unsafe fn _mm_maskz_mul_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_pd&expand=3925)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mul_pd&expand=3925)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulpd))]
@@ -1525,7 +1525,7 @@ pub unsafe fn _mm512_mul_pd(a: __m512d, b: __m512d) -> __m512d {
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_pd&expand=3923)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mul_pd&expand=3923)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulpd))]
@@ -1536,7 +1536,7 @@ pub unsafe fn _mm512_mask_mul_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m51
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_pd&expand=3924)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mul_pd&expand=3924)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulpd))]
@@ -1548,7 +1548,7 @@ pub unsafe fn _mm512_maskz_mul_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mul_pd&expand=3920)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_mul_pd&expand=3920)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmulpd))]
@@ -1559,7 +1559,7 @@ pub unsafe fn _mm256_mask_mul_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m25
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mul_pd&expand=3921)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_mul_pd&expand=3921)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmulpd))]
@@ -1571,7 +1571,7 @@ pub unsafe fn _mm256_maskz_mul_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_pd&expand=3917)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_mul_pd&expand=3917)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmulpd))]
@@ -1582,7 +1582,7 @@ pub unsafe fn _mm_mask_mul_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_pd&expand=3918)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_mul_pd&expand=3918)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmulpd))]
@@ -1594,7 +1594,7 @@ pub unsafe fn _mm_maskz_mul_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
/// Divide packed single-precision (32-bit) floating-point elements in a by packed elements in b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_div_ps&expand=2162)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_div_ps&expand=2162)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivps))]
@@ -1604,7 +1604,7 @@ pub unsafe fn _mm512_div_ps(a: __m512, b: __m512) -> __m512 {
/// Divide packed single-precision (32-bit) floating-point elements in a by packed elements in b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_div_ps&expand=2163)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_div_ps&expand=2163)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivps))]
@@ -1615,7 +1615,7 @@ pub unsafe fn _mm512_mask_div_ps(src: __m512, k: __mmask16, a: __m512, b: __m512
/// Divide packed single-precision (32-bit) floating-point elements in a by packed elements in b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_div_ps&expand=2164)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_div_ps&expand=2164)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivps))]
@@ -1627,7 +1627,7 @@ pub unsafe fn _mm512_maskz_div_ps(k: __mmask16, a: __m512, b: __m512) -> __m512
/// Divide packed single-precision (32-bit) floating-point elements in a by packed elements in b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_div_ps&expand=2160)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_div_ps&expand=2160)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vdivps))]
@@ -1638,7 +1638,7 @@ pub unsafe fn _mm256_mask_div_ps(src: __m256, k: __mmask8, a: __m256, b: __m256)
/// Divide packed single-precision (32-bit) floating-point elements in a by packed elements in b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_div_ps&expand=2161)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_div_ps&expand=2161)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vdivps))]
@@ -1650,7 +1650,7 @@ pub unsafe fn _mm256_maskz_div_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 {
/// Divide packed single-precision (32-bit) floating-point elements in a by packed elements in b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_div_ps&expand=2157)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_div_ps&expand=2157)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vdivps))]
@@ -1661,7 +1661,7 @@ pub unsafe fn _mm_mask_div_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
/// Divide packed single-precision (32-bit) floating-point elements in a by packed elements in b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_div_ps&expand=2158)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_div_ps&expand=2158)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vdivps))]
@@ -1673,7 +1673,7 @@ pub unsafe fn _mm_maskz_div_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Divide packed double-precision (64-bit) floating-point elements in a by packed elements in b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_div_pd&expand=2153)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_div_pd&expand=2153)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivpd))]
@@ -1683,7 +1683,7 @@ pub unsafe fn _mm512_div_pd(a: __m512d, b: __m512d) -> __m512d {
/// Divide packed double-precision (64-bit) floating-point elements in a by packed elements in b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_div_pd&expand=2154)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_div_pd&expand=2154)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivpd))]
@@ -1694,7 +1694,7 @@ pub unsafe fn _mm512_mask_div_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m51
/// Divide packed double-precision (64-bit) floating-point elements in a by packed elements in b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_div_pd&expand=2155)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_div_pd&expand=2155)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivpd))]
@@ -1706,7 +1706,7 @@ pub unsafe fn _mm512_maskz_div_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512
/// Divide packed double-precision (64-bit) floating-point elements in a by packed elements in b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_div_pd&expand=2151)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_div_pd&expand=2151)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vdivpd))]
@@ -1717,7 +1717,7 @@ pub unsafe fn _mm256_mask_div_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m25
/// Divide packed double-precision (64-bit) floating-point elements in a by packed elements in b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_div_pd&expand=2152)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_div_pd&expand=2152)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vdivpd))]
@@ -1729,7 +1729,7 @@ pub unsafe fn _mm256_maskz_div_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256
/// Divide packed double-precision (64-bit) floating-point elements in a by packed elements in b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_div_pd&expand=2148)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_div_pd&expand=2148)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vdivpd))]
@@ -1740,7 +1740,7 @@ pub unsafe fn _mm_mask_div_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
/// Divide packed double-precision (64-bit) floating-point elements in a by packed elements in b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_div_pd&expand=2149)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_div_pd&expand=2149)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vdivpd))]
@@ -1752,7 +1752,7 @@ pub unsafe fn _mm_maskz_div_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
/// Compare packed signed 32-bit integers in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_epi32&expand=3582)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epi32&expand=3582)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmaxsd))]
@@ -1762,7 +1762,7 @@ pub unsafe fn _mm512_max_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed signed 32-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_epi32&expand=3580)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epi32&expand=3580)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmaxsd))]
@@ -1773,7 +1773,7 @@ pub unsafe fn _mm512_mask_max_epi32(src: __m512i, k: __mmask16, a: __m512i, b: _
/// Compare packed signed 32-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_epi32&expand=3581)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epi32&expand=3581)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmaxsd))]
@@ -1785,7 +1785,7 @@ pub unsafe fn _mm512_maskz_max_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __
/// Compare packed signed 32-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_epi32&expand=3577)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epi32&expand=3577)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsd))]
@@ -1796,7 +1796,7 @@ pub unsafe fn _mm256_mask_max_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Compare packed signed 32-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_epi32&expand=3578)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epi32&expand=3578)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsd))]
@@ -1808,7 +1808,7 @@ pub unsafe fn _mm256_maskz_max_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Compare packed signed 32-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_epi32&expand=3574)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epi32&expand=3574)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsd))]
@@ -1819,7 +1819,7 @@ pub unsafe fn _mm_mask_max_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compare packed signed 32-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_epi32&expand=3575)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epi32&expand=3575)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsd))]
@@ -1831,7 +1831,7 @@ pub unsafe fn _mm_maskz_max_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compare packed signed 64-bit integers in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_epi64&expand=3591)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epi64&expand=3591)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmaxsq))]
@@ -1841,7 +1841,7 @@ pub unsafe fn _mm512_max_epi64(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed signed 64-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_epi64&expand=3589)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epi64&expand=3589)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmaxsq))]
@@ -1852,7 +1852,7 @@ pub unsafe fn _mm512_mask_max_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __
/// Compare packed signed 64-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_epi64&expand=3590)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epi64&expand=3590)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmaxsq))]
@@ -1864,7 +1864,7 @@ pub unsafe fn _mm512_maskz_max_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m
/// Compare packed signed 64-bit integers in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_epi64&expand=3588)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_max_epi64&expand=3588)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsq))]
@@ -1874,7 +1874,7 @@ pub unsafe fn _mm256_max_epi64(a: __m256i, b: __m256i) -> __m256i {
/// Compare packed signed 64-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_epi64&expand=3586)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epi64&expand=3586)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsq))]
@@ -1885,7 +1885,7 @@ pub unsafe fn _mm256_mask_max_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Compare packed signed 64-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_epi64&expand=3587)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epi64&expand=3587)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsq))]
@@ -1897,7 +1897,7 @@ pub unsafe fn _mm256_maskz_max_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Compare packed signed 64-bit integers in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epi64&expand=3585)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi64&expand=3585)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsq))]
@@ -1907,7 +1907,7 @@ pub unsafe fn _mm_max_epi64(a: __m128i, b: __m128i) -> __m128i {
/// Compare packed signed 64-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_epi64&expand=3583)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epi64&expand=3583)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsq))]
@@ -1918,7 +1918,7 @@ pub unsafe fn _mm_mask_max_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compare packed signed 64-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_epi64&expand=3584)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epi64&expand=3584)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxsq))]
@@ -1930,7 +1930,7 @@ pub unsafe fn _mm_maskz_max_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_ps&expand=3655)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_ps&expand=3655)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxps))]
@@ -1944,7 +1944,7 @@ pub unsafe fn _mm512_max_ps(a: __m512, b: __m512) -> __m512 {
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_ps&expand=3653)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_ps&expand=3653)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxps))]
@@ -1955,7 +1955,7 @@ pub unsafe fn _mm512_mask_max_ps(src: __m512, k: __mmask16, a: __m512, b: __m512
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_ps&expand=3654)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_ps&expand=3654)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxps))]
@@ -1967,7 +1967,7 @@ pub unsafe fn _mm512_maskz_max_ps(k: __mmask16, a: __m512, b: __m512) -> __m512
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_ps&expand=3650)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_ps&expand=3650)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxps))]
@@ -1978,7 +1978,7 @@ pub unsafe fn _mm256_mask_max_ps(src: __m256, k: __mmask8, a: __m256, b: __m256)
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_ps&expand=3651)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_ps&expand=3651)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxps))]
@@ -1990,7 +1990,7 @@ pub unsafe fn _mm256_maskz_max_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 {
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_ps&expand=3647)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_ps&expand=3647)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxps))]
@@ -2001,7 +2001,7 @@ pub unsafe fn _mm_mask_max_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_ps&expand=3648)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_ps&expand=3648)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxps))]
@@ -2013,7 +2013,7 @@ pub unsafe fn _mm_maskz_max_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_pd&expand=3645)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_pd&expand=3645)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxpd))]
@@ -2023,7 +2023,7 @@ pub unsafe fn _mm512_max_pd(a: __m512d, b: __m512d) -> __m512d {
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_pd&expand=3643)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_pd&expand=3643)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxpd))]
@@ -2034,7 +2034,7 @@ pub unsafe fn _mm512_mask_max_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m51
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_pd&expand=3644)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_pd&expand=3644)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxpd))]
@@ -2046,7 +2046,7 @@ pub unsafe fn _mm512_maskz_max_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_pd&expand=3640)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_pd&expand=3640)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxpd))]
@@ -2057,7 +2057,7 @@ pub unsafe fn _mm256_mask_max_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m25
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_pd&expand=3641)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_pd&expand=3641)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxpd))]
@@ -2069,7 +2069,7 @@ pub unsafe fn _mm256_maskz_max_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_pd&expand=3637)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_pd&expand=3637)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxpd))]
@@ -2080,7 +2080,7 @@ pub unsafe fn _mm_mask_max_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_pd&expand=3638)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_pd&expand=3638)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmaxpd))]
@@ -2092,7 +2092,7 @@ pub unsafe fn _mm_maskz_max_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
/// Compare packed unsigned 32-bit integers in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_epu32&expand=3618)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epu32&expand=3618)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmaxud))]
@@ -2102,7 +2102,7 @@ pub unsafe fn _mm512_max_epu32(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed unsigned 32-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_epu32&expand=3616)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epu32&expand=3616)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmaxud))]
@@ -2113,7 +2113,7 @@ pub unsafe fn _mm512_mask_max_epu32(src: __m512i, k: __mmask16, a: __m512i, b: _
/// Compare packed unsigned 32-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_epu32&expand=3617)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epu32&expand=3617)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmaxud))]
@@ -2125,7 +2125,7 @@ pub unsafe fn _mm512_maskz_max_epu32(k: __mmask16, a: __m512i, b: __m512i) -> __
/// Compare packed unsigned 32-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_epu32&expand=3613)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epu32&expand=3613)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxud))]
@@ -2136,7 +2136,7 @@ pub unsafe fn _mm256_mask_max_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Compare packed unsigned 32-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_epu32&expand=3614)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epu32&expand=3614)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxud))]
@@ -2148,7 +2148,7 @@ pub unsafe fn _mm256_maskz_max_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Compare packed unsigned 32-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_epu32&expand=3610)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epu32&expand=3610)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxud))]
@@ -2159,7 +2159,7 @@ pub unsafe fn _mm_mask_max_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compare packed unsigned 32-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_epu32&expand=3611)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epu32&expand=3611)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxud))]
@@ -2171,7 +2171,7 @@ pub unsafe fn _mm_maskz_max_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compare packed unsigned 64-bit integers in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_epu64&expand=3627)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_epu64&expand=3627)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmaxuq))]
@@ -2181,7 +2181,7 @@ pub unsafe fn _mm512_max_epu64(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed unsigned 64-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_epu64&expand=3625)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_epu64&expand=3625)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmaxuq))]
@@ -2192,7 +2192,7 @@ pub unsafe fn _mm512_mask_max_epu64(src: __m512i, k: __mmask8, a: __m512i, b: __
/// Compare packed unsigned 64-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_epu&expand=3626)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_epu&expand=3626)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmaxuq))]
@@ -2204,7 +2204,7 @@ pub unsafe fn _mm512_maskz_max_epu64(k: __mmask8, a: __m512i, b: __m512i) -> __m
/// Compare packed unsigned 64-bit integers in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_epu64&expand=3624)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_max_epu64&expand=3624)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxuq))]
@@ -2214,7 +2214,7 @@ pub unsafe fn _mm256_max_epu64(a: __m256i, b: __m256i) -> __m256i {
/// Compare packed unsigned 64-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_epu64&expand=3622)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_max_epu64&expand=3622)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxuq))]
@@ -2225,7 +2225,7 @@ pub unsafe fn _mm256_mask_max_epu64(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Compare packed unsigned 64-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_epu64&expand=3623)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_max_epu64&expand=3623)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxuq))]
@@ -2237,7 +2237,7 @@ pub unsafe fn _mm256_maskz_max_epu64(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Compare packed unsigned 64-bit integers in a and b, and store packed maximum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu64&expand=3621)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu64&expand=3621)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxuq))]
@@ -2247,7 +2247,7 @@ pub unsafe fn _mm_max_epu64(a: __m128i, b: __m128i) -> __m128i {
/// Compare packed unsigned 64-bit integers in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_epu64&expand=3619)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_max_epu64&expand=3619)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxuq))]
@@ -2258,7 +2258,7 @@ pub unsafe fn _mm_mask_max_epu64(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compare packed unsigned 64-bit integers in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_epu64&expand=3620)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_max_epu64&expand=3620)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmaxuq))]
@@ -2270,7 +2270,7 @@ pub unsafe fn _mm_maskz_max_epu64(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compare packed signed 32-bit integers in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_epi32&expand=3696)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epi32&expand=3696)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpminsd))]
@@ -2280,7 +2280,7 @@ pub unsafe fn _mm512_min_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed signed 32-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_epi32&expand=3694)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epi32&expand=3694)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpminsd))]
@@ -2291,7 +2291,7 @@ pub unsafe fn _mm512_mask_min_epi32(src: __m512i, k: __mmask16, a: __m512i, b: _
/// Compare packed signed 32-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_epi32&expand=3695)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epi32&expand=3695)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpminsd))]
@@ -2303,7 +2303,7 @@ pub unsafe fn _mm512_maskz_min_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __
/// Compare packed signed 32-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_epi32&expand=3691)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epi32&expand=3691)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsd))]
@@ -2314,7 +2314,7 @@ pub unsafe fn _mm256_mask_min_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Compare packed signed 32-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_epi32&expand=3692)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epi32&expand=3692)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsd))]
@@ -2326,7 +2326,7 @@ pub unsafe fn _mm256_maskz_min_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Compare packed signed 32-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_epi32&expand=3688)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epi32&expand=3688)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsd))]
@@ -2337,7 +2337,7 @@ pub unsafe fn _mm_mask_min_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compare packed signed 32-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_epi32&expand=3689)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epi32&expand=3689)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsd))]
@@ -2349,7 +2349,7 @@ pub unsafe fn _mm_maskz_min_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compare packed signed 64-bit integers in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_epi64&expand=3705)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epi64&expand=3705)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpminsq))]
@@ -2359,7 +2359,7 @@ pub unsafe fn _mm512_min_epi64(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed signed 64-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_epi64&expand=3703)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epi64&expand=3703)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpminsq))]
@@ -2370,7 +2370,7 @@ pub unsafe fn _mm512_mask_min_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __
/// Compare packed signed 64-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_maskz_min_epi64&expand=3704)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_min_epi64&expand=3704)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpminsq))]
@@ -2382,7 +2382,7 @@ pub unsafe fn _mm512_maskz_min_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m
/// Compare packed signed 64-bit integers in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_epi64&expand=3702)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_min_epi64&expand=3702)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsq))]
@@ -2392,7 +2392,7 @@ pub unsafe fn _mm256_min_epi64(a: __m256i, b: __m256i) -> __m256i {
/// Compare packed signed 64-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_epi64&expand=3700)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epi64&expand=3700)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsq))]
@@ -2403,7 +2403,7 @@ pub unsafe fn _mm256_mask_min_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Compare packed signed 64-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_epi64&expand=3701)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epi64&expand=3701)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminsq))]
@@ -2415,7 +2415,7 @@ pub unsafe fn _mm256_maskz_min_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_ps&expand=3769)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_ps&expand=3769)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminps))]
@@ -2429,7 +2429,7 @@ pub unsafe fn _mm512_min_ps(a: __m512, b: __m512) -> __m512 {
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_ps&expand=3767)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_ps&expand=3767)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminps))]
@@ -2440,7 +2440,7 @@ pub unsafe fn _mm512_mask_min_ps(src: __m512, k: __mmask16, a: __m512, b: __m512
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_ps&expand=3768)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_ps&expand=3768)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminps))]
@@ -2452,7 +2452,7 @@ pub unsafe fn _mm512_maskz_min_ps(k: __mmask16, a: __m512, b: __m512) -> __m512
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_ps&expand=3764)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_ps&expand=3764)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vminps))]
@@ -2463,7 +2463,7 @@ pub unsafe fn _mm256_mask_min_ps(src: __m256, k: __mmask8, a: __m256, b: __m256)
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_ps&expand=3765)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_ps&expand=3765)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vminps))]
@@ -2475,7 +2475,7 @@ pub unsafe fn _mm256_maskz_min_ps(k: __mmask8, a: __m256, b: __m256) -> __m256 {
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_ps&expand=3761)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_ps&expand=3761)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vminps))]
@@ -2486,7 +2486,7 @@ pub unsafe fn _mm_mask_min_ps(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_ps&expand=3762)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_ps&expand=3762)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vminps))]
@@ -2500,7 +2500,7 @@ pub unsafe fn _mm_maskz_min_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_min_pd&expand=3759)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_min_pd&expand=3759)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminpd))]
@@ -2510,7 +2510,7 @@ pub unsafe fn _mm512_min_pd(a: __m512d, b: __m512d) -> __m512d {
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_mask_min_pd&expand=3757)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_min_pd&expand=3757)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminpd))]
@@ -2521,7 +2521,7 @@ pub unsafe fn _mm512_mask_min_pd(src: __m512d, k: __mmask8, a: __m512d, b: __m51
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_maskz_min_pd&expand=3758)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_min_pd&expand=3758)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminpd))]
@@ -2533,7 +2533,7 @@ pub unsafe fn _mm512_maskz_min_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m512
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_pd&expand=3754)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_pd&expand=3754)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vminpd))]
@@ -2544,7 +2544,7 @@ pub unsafe fn _mm256_mask_min_pd(src: __m256d, k: __mmask8, a: __m256d, b: __m25
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_pd&expand=3755)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_pd&expand=3755)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vminpd))]
@@ -2556,7 +2556,7 @@ pub unsafe fn _mm256_maskz_min_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m256
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_pd&expand=3751)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_pd&expand=3751)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vminpd))]
@@ -2567,7 +2567,7 @@ pub unsafe fn _mm_mask_min_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_pd&expand=3752)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_pd&expand=3752)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vminpd))]
@@ -2579,7 +2579,7 @@ pub unsafe fn _mm_maskz_min_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
/// Compare packed unsigned 32-bit integers in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_epu32&expand=3732)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epu32&expand=3732)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpminud))]
@@ -2589,7 +2589,7 @@ pub unsafe fn _mm512_min_epu32(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed unsigned 32-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_epu32&expand=3730)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epu32&expand=3730)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpminud))]
@@ -2600,7 +2600,7 @@ pub unsafe fn _mm512_mask_min_epu32(src: __m512i, k: __mmask16, a: __m512i, b: _
/// Compare packed unsigned 32-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_epu32&expand=3731)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epu32&expand=3731)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpminud))]
@@ -2612,7 +2612,7 @@ pub unsafe fn _mm512_maskz_min_epu32(k: __mmask16, a: __m512i, b: __m512i) -> __
/// Compare packed unsigned 32-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_epu32&expand=3727)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epu32&expand=3727)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminud))]
@@ -2623,7 +2623,7 @@ pub unsafe fn _mm256_mask_min_epu32(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Compare packed unsigned 32-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_epu32&expand=3728)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epu32&expand=3728)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminud))]
@@ -2635,7 +2635,7 @@ pub unsafe fn _mm256_maskz_min_epu32(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Compare packed unsigned 32-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_epu32&expand=3724)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epu32&expand=3724)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminud))]
@@ -2646,7 +2646,7 @@ pub unsafe fn _mm_mask_min_epu32(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compare packed unsigned 32-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_epu32&expand=3725)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epu32&expand=3725)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminud))]
@@ -2658,7 +2658,7 @@ pub unsafe fn _mm_maskz_min_epu32(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compare packed unsigned 64-bit integers in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_epu64&expand=3741)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_epu64&expand=3741)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpminuq))]
@@ -2668,7 +2668,7 @@ pub unsafe fn _mm512_min_epu64(a: __m512i, b: __m512i) -> __m512i {
/// Compare packed unsigned 64-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_epu64&expand=3739)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_epu64&expand=3739)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpminuq))]
@@ -2679,7 +2679,7 @@ pub unsafe fn _mm512_mask_min_epu64(src: __m512i, k: __mmask8, a: __m512i, b: __
/// Compare packed unsigned 64-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_epu64&expand=3740)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_epu64&expand=3740)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpminuq))]
@@ -2691,7 +2691,7 @@ pub unsafe fn _mm512_maskz_min_epu64(k: __mmask8, a: __m512i, b: __m512i) -> __m
/// Compare packed unsigned 64-bit integers in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_epu64&expand=3738)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_min_epu64&expand=3738)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminuq))]
@@ -2701,7 +2701,7 @@ pub unsafe fn _mm256_min_epu64(a: __m256i, b: __m256i) -> __m256i {
/// Compare packed unsigned 64-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_epu64&expand=3736)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_min_epu64&expand=3736)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminuq))]
@@ -2712,7 +2712,7 @@ pub unsafe fn _mm256_mask_min_epu64(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Compare packed unsigned 64-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_epu64&expand=3737)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_min_epu64&expand=3737)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminuq))]
@@ -2724,7 +2724,7 @@ pub unsafe fn _mm256_maskz_min_epu64(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Compare packed unsigned 64-bit integers in a and b, and store packed minimum values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epu64&expand=3735)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epu64&expand=3735)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminuq))]
@@ -2734,7 +2734,7 @@ pub unsafe fn _mm_min_epu64(a: __m128i, b: __m128i) -> __m128i {
/// Compare packed unsigned 64-bit integers in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_epu64&expand=3733)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_min_epu64&expand=3733)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminuq))]
@@ -2745,7 +2745,7 @@ pub unsafe fn _mm_mask_min_epu64(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compare packed unsigned 64-bit integers in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_epu64&expand=3734)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_min_epu64&expand=3734)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpminuq))]
@@ -2757,7 +2757,7 @@ pub unsafe fn _mm_maskz_min_epu64(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compute the square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sqrt_ps&expand=5371)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sqrt_ps&expand=5371)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtps))]
@@ -2767,7 +2767,7 @@ pub unsafe fn _mm512_sqrt_ps(a: __m512) -> __m512 {
/// Compute the square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_ps&expand=5369)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sqrt_ps&expand=5369)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtps))]
@@ -2778,7 +2778,7 @@ pub unsafe fn _mm512_mask_sqrt_ps(src: __m512, k: __mmask16, a: __m512) -> __m51
/// Compute the square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sqrt_ps&expand=5370)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sqrt_ps&expand=5370)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtps))]
@@ -2790,7 +2790,7 @@ pub unsafe fn _mm512_maskz_sqrt_ps(k: __mmask16, a: __m512) -> __m512 {
/// Compute the square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sqrt_ps&expand=5366)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sqrt_ps&expand=5366)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtps))]
@@ -2801,7 +2801,7 @@ pub unsafe fn _mm256_mask_sqrt_ps(src: __m256, k: __mmask8, a: __m256) -> __m256
/// Compute the square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sqrt_ps&expand=5367)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sqrt_ps&expand=5367)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtps))]
@@ -2813,7 +2813,7 @@ pub unsafe fn _mm256_maskz_sqrt_ps(k: __mmask8, a: __m256) -> __m256 {
/// Compute the square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_ps&expand=5363)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sqrt_ps&expand=5363)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtps))]
@@ -2824,7 +2824,7 @@ pub unsafe fn _mm_mask_sqrt_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 {
/// Compute the square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_ps&expand=5364)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sqrt_ps&expand=5364)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtps))]
@@ -2836,7 +2836,7 @@ pub unsafe fn _mm_maskz_sqrt_ps(k: __mmask8, a: __m128) -> __m128 {
/// Compute the square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sqrt_pd&expand=5362)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sqrt_pd&expand=5362)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtpd))]
@@ -2846,7 +2846,7 @@ pub unsafe fn _mm512_sqrt_pd(a: __m512d) -> __m512d {
/// Compute the square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_pd&expand=5360)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sqrt_pd&expand=5360)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtpd))]
@@ -2857,7 +2857,7 @@ pub unsafe fn _mm512_mask_sqrt_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m5
/// Compute the square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sqrt_pd&expand=5361)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sqrt_pd&expand=5361)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtpd))]
@@ -2869,7 +2869,7 @@ pub unsafe fn _mm512_maskz_sqrt_pd(k: __mmask8, a: __m512d) -> __m512d {
/// Compute the square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sqrt_pd&expand=5357)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sqrt_pd&expand=5357)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtpd))]
@@ -2880,7 +2880,7 @@ pub unsafe fn _mm256_mask_sqrt_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m2
/// Compute the square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sqrt_pd&expand=5358)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sqrt_pd&expand=5358)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtpd))]
@@ -2892,7 +2892,7 @@ pub unsafe fn _mm256_maskz_sqrt_pd(k: __mmask8, a: __m256d) -> __m256d {
/// Compute the square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_pd&expand=5354)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sqrt_pd&expand=5354)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtpd))]
@@ -2903,7 +2903,7 @@ pub unsafe fn _mm_mask_sqrt_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128d
/// Compute the square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_pd&expand=5355)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sqrt_pd&expand=5355)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vsqrtpd))]
@@ -2915,7 +2915,7 @@ pub unsafe fn _mm_maskz_sqrt_pd(k: __mmask8, a: __m128d) -> __m128d {
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_ps&expand=2557)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmadd_ps&expand=2557)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -2925,7 +2925,7 @@ pub unsafe fn _mm512_fmadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 {
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_ps&expand=2558)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmadd_ps&expand=2558)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -2936,7 +2936,7 @@ pub unsafe fn _mm512_mask_fmadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m512
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_ps&expand=2560)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmadd_ps&expand=2560)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -2948,7 +2948,7 @@ pub unsafe fn _mm512_maskz_fmadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m51
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_ps&expand=2559)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmadd_ps&expand=2559)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -2959,7 +2959,7 @@ pub unsafe fn _mm512_mask3_fmadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask1
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmadd_ps&expand=2554)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmadd_ps&expand=2554)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -2970,7 +2970,7 @@ pub unsafe fn _mm256_mask_fmadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256)
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmadd_ps&expand=2556)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmadd_ps&expand=2556)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -2982,7 +2982,7 @@ pub unsafe fn _mm256_maskz_fmadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m256
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmadd_ps&expand=2555)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmadd_ps&expand=2555)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -2993,7 +2993,7 @@ pub unsafe fn _mm256_mask3_fmadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask8
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_ps&expand=2550)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmadd_ps&expand=2550)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -3004,7 +3004,7 @@ pub unsafe fn _mm_mask_fmadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) ->
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_ps&expand=2552)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmadd_ps&expand=2552)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -3016,7 +3016,7 @@ pub unsafe fn _mm_maskz_fmadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_ps&expand=2551)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmadd_ps&expand=2551)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -3027,7 +3027,7 @@ pub unsafe fn _mm_mask3_fmadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_pd&expand=2545)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmadd_pd&expand=2545)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -3037,7 +3037,7 @@ pub unsafe fn _mm512_fmadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d {
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_pd&expand=2546)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmadd_pd&expand=2546)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -3048,7 +3048,7 @@ pub unsafe fn _mm512_mask_fmadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m51
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_pd&expand=2548)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmadd_pd&expand=2548)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -3060,7 +3060,7 @@ pub unsafe fn _mm512_maskz_fmadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m5
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_pd&expand=2547)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmadd_pd&expand=2547)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -3071,7 +3071,7 @@ pub unsafe fn _mm512_mask3_fmadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mma
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmadd_pd&expand=2542)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmadd_pd&expand=2542)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -3082,7 +3082,7 @@ pub unsafe fn _mm256_mask_fmadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m25
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmadd_pd&expand=2544)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmadd_pd&expand=2544)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -3094,7 +3094,7 @@ pub unsafe fn _mm256_maskz_fmadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m2
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmadd_pd&expand=2543)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmadd_pd&expand=2543)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -3105,7 +3105,7 @@ pub unsafe fn _mm256_mask3_fmadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mma
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_pd&expand=2538)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmadd_pd&expand=2538)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -3116,7 +3116,7 @@ pub unsafe fn _mm_mask_fmadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d)
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_pd&expand=2540)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmadd_pd&expand=2540)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -3128,7 +3128,7 @@ pub unsafe fn _mm_maskz_fmadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_pd&expand=2539)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmadd_pd&expand=2539)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -3139,7 +3139,7 @@ pub unsafe fn _mm_mask3_fmadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsub_ps&expand=2643)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsub_ps&expand=2643)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub
@@ -3151,7 +3151,7 @@ pub unsafe fn _mm512_fmsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 {
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsub_ps&expand=2644)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsub_ps&expand=2644)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub
@@ -3162,7 +3162,7 @@ pub unsafe fn _mm512_mask_fmsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m512
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsub_ps&expand=2646)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsub_ps&expand=2646)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub
@@ -3174,7 +3174,7 @@ pub unsafe fn _mm512_maskz_fmsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m51
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsub_ps&expand=2645)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsub_ps&expand=2645)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub
@@ -3185,7 +3185,7 @@ pub unsafe fn _mm512_mask3_fmsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask1
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmsub_ps&expand=2640)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmsub_ps&expand=2640)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub
@@ -3196,7 +3196,7 @@ pub unsafe fn _mm256_mask_fmsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256)
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmsub_ps&expand=2642)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmsub_ps&expand=2642)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub
@@ -3208,7 +3208,7 @@ pub unsafe fn _mm256_maskz_fmsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m256
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmsub_ps&expand=2641)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmsub_ps&expand=2641)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub
@@ -3219,7 +3219,7 @@ pub unsafe fn _mm256_mask3_fmsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask8
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsub_ps&expand=2636)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmsub_ps&expand=2636)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub
@@ -3230,7 +3230,7 @@ pub unsafe fn _mm_mask_fmsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) ->
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsub_ps&expand=2638)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmsub_ps&expand=2638)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub
@@ -3242,7 +3242,7 @@ pub unsafe fn _mm_maskz_fmsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128) -
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsub_ps&expand=2637)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmsub_ps&expand=2637)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generate vfmadd, gcc generate vfmsub
@@ -3253,7 +3253,7 @@ pub unsafe fn _mm_mask3_fmsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8) -
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsub_pd&expand=2631)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsub_pd&expand=2631)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub
@@ -3265,7 +3265,7 @@ pub unsafe fn _mm512_fmsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d {
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsub_pd&expand=2632)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsub_pd&expand=2632)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub
@@ -3276,7 +3276,7 @@ pub unsafe fn _mm512_mask_fmsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m51
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsub_pd&expand=2634)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsub_pd&expand=2634)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub
@@ -3288,7 +3288,7 @@ pub unsafe fn _mm512_maskz_fmsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m5
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsub_pd&expand=2633)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsub_pd&expand=2633)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub
@@ -3299,7 +3299,7 @@ pub unsafe fn _mm512_mask3_fmsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mma
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmsub_pd&expand=2628)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmsub_pd&expand=2628)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub
@@ -3310,7 +3310,7 @@ pub unsafe fn _mm256_mask_fmsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m25
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmsub_pd&expand=2630)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmsub_pd&expand=2630)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub
@@ -3322,7 +3322,7 @@ pub unsafe fn _mm256_maskz_fmsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m2
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmsub_pd&expand=2629)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmsub_pd&expand=2629)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub
@@ -3333,7 +3333,7 @@ pub unsafe fn _mm256_mask3_fmsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mma
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsub_pd&expand=2624)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmsub_pd&expand=2624)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub
@@ -3344,7 +3344,7 @@ pub unsafe fn _mm_mask_fmsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d)
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsub_pd&expand=2626)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmsub_pd&expand=2626)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub
@@ -3356,7 +3356,7 @@ pub unsafe fn _mm_maskz_fmsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsub_pd&expand=2625)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmsub_pd&expand=2625)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsub))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang fmadd, gcc fmsub
@@ -3367,7 +3367,7 @@ pub unsafe fn _mm_mask3_fmsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmaddsub_ps&expand=2611)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmaddsub_ps&expand=2611)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -3382,7 +3382,7 @@ pub unsafe fn _mm512_fmaddsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 {
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmaddsub_ps&expand=2612)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmaddsub_ps&expand=2612)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -3393,7 +3393,7 @@ pub unsafe fn _mm512_mask_fmaddsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmaddsub_ps&expand=2614)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmaddsub_ps&expand=2614)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -3405,7 +3405,7 @@ pub unsafe fn _mm512_maskz_fmaddsub_ps(k: __mmask16, a: __m512, b: __m512, c: __
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmaddsub_ps&expand=2613)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmaddsub_ps&expand=2613)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -3416,7 +3416,7 @@ pub unsafe fn _mm512_mask3_fmaddsub_ps(a: __m512, b: __m512, c: __m512, k: __mma
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmaddsub_ps&expand=2608)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmaddsub_ps&expand=2608)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -3427,7 +3427,7 @@ pub unsafe fn _mm256_mask_fmaddsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m2
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmaddsub_ps&expand=2610)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmaddsub_ps&expand=2610)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -3439,7 +3439,7 @@ pub unsafe fn _mm256_maskz_fmaddsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmaddsub_ps&expand=2609)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmaddsub_ps&expand=2609)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -3450,7 +3450,7 @@ pub unsafe fn _mm256_mask3_fmaddsub_ps(a: __m256, b: __m256, c: __m256, k: __mma
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmaddsub_ps&expand=2604)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmaddsub_ps&expand=2604)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -3473,7 +3473,7 @@ pub unsafe fn _mm_maskz_fmaddsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmaddsub_ps&expand=2605)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmaddsub_ps&expand=2605)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -3484,7 +3484,7 @@ pub unsafe fn _mm_mask3_fmaddsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmaddsub_pd&expand=2599)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmaddsub_pd&expand=2599)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -3499,7 +3499,7 @@ pub unsafe fn _mm512_fmaddsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmaddsub_pd&expand=2600)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmaddsub_pd&expand=2600)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -3510,7 +3510,7 @@ pub unsafe fn _mm512_mask_fmaddsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmaddsub_pd&expand=2602)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmaddsub_pd&expand=2602)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -3522,7 +3522,7 @@ pub unsafe fn _mm512_maskz_fmaddsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: _
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmaddsub_ps&expand=2613)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmaddsub_ps&expand=2613)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -3533,7 +3533,7 @@ pub unsafe fn _mm512_mask3_fmaddsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmaddsub_pd&expand=2596)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmaddsub_pd&expand=2596)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -3544,7 +3544,7 @@ pub unsafe fn _mm256_mask_fmaddsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmaddsub_pd&expand=2598)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmaddsub_pd&expand=2598)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -3556,7 +3556,7 @@ pub unsafe fn _mm256_maskz_fmaddsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: _
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmaddsub_pd&expand=2597)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmaddsub_pd&expand=2597)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -3567,7 +3567,7 @@ pub unsafe fn _mm256_mask3_fmaddsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmaddsub_pd&expand=2592)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmaddsub_pd&expand=2592)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -3578,7 +3578,7 @@ pub unsafe fn _mm_mask_fmaddsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m12
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmaddsub_pd&expand=2594)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmaddsub_pd&expand=2594)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -3590,7 +3590,7 @@ pub unsafe fn _mm_maskz_fmaddsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m1
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmaddsub_pd&expand=2593)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmaddsub_pd&expand=2593)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -3601,7 +3601,7 @@ pub unsafe fn _mm_mask3_fmaddsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mma
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsubadd_ps&expand=2691)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsubadd_ps&expand=2691)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -3618,7 +3618,7 @@ pub unsafe fn _mm512_fmsubadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 {
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsubadd_ps&expand=2692)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsubadd_ps&expand=2692)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -3629,7 +3629,7 @@ pub unsafe fn _mm512_mask_fmsubadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsubadd_ps&expand=2694)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsubadd_ps&expand=2694)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -3641,7 +3641,7 @@ pub unsafe fn _mm512_maskz_fmsubadd_ps(k: __mmask16, a: __m512, b: __m512, c: __
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsubadd_ps&expand=2693)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsubadd_ps&expand=2693)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -3652,7 +3652,7 @@ pub unsafe fn _mm512_mask3_fmsubadd_ps(a: __m512, b: __m512, c: __m512, k: __mma
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmsubadd_ps&expand=2688)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmsubadd_ps&expand=2688)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -3663,7 +3663,7 @@ pub unsafe fn _mm256_mask_fmsubadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m2
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmsubadd_ps&expand=2690)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmsubadd_ps&expand=2690)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -3675,7 +3675,7 @@ pub unsafe fn _mm256_maskz_fmsubadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmsubadd_ps&expand=2689)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmsubadd_ps&expand=2689)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -3686,7 +3686,7 @@ pub unsafe fn _mm256_mask3_fmsubadd_ps(a: __m256, b: __m256, c: __m256, k: __mma
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsubadd_ps&expand=2684)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmsubadd_ps&expand=2684)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -3697,7 +3697,7 @@ pub unsafe fn _mm_mask_fmsubadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128)
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsubadd_ps&expand=2686)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmsubadd_ps&expand=2686)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -3709,7 +3709,7 @@ pub unsafe fn _mm_maskz_fmsubadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsubadd_ps&expand=2685)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmsubadd_ps&expand=2685)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -3720,7 +3720,7 @@ pub unsafe fn _mm_mask3_fmsubadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsubadd_pd&expand=2679)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsubadd_pd&expand=2679)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -3737,7 +3737,7 @@ pub unsafe fn _mm512_fmsubadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsubadd_pd&expand=2680)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsubadd_pd&expand=2680)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -3748,7 +3748,7 @@ pub unsafe fn _mm512_mask_fmsubadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsubadd_pd&expand=2682)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsubadd_pd&expand=2682)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -3760,7 +3760,7 @@ pub unsafe fn _mm512_maskz_fmsubadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: _
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsubadd_pd&expand=2681)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsubadd_pd&expand=2681)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -3771,7 +3771,7 @@ pub unsafe fn _mm512_mask3_fmsubadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmsubadd_pd&expand=2676)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fmsubadd_pd&expand=2676)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -3782,7 +3782,7 @@ pub unsafe fn _mm256_mask_fmsubadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmsubadd_pd&expand=2678)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fmsubadd_pd&expand=2678)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -3794,7 +3794,7 @@ pub unsafe fn _mm256_maskz_fmsubadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: _
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmsubadd_pd&expand=2677)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fmsubadd_pd&expand=2677)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -3805,7 +3805,7 @@ pub unsafe fn _mm256_mask3_fmsubadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsubadd_pd&expand=2672)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fmsubadd_pd&expand=2672)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -3816,7 +3816,7 @@ pub unsafe fn _mm_mask_fmsubadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m12
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively add and subtract packed elements in c to/from the intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsubadd_pd&expand=2674)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fmsubadd_pd&expand=2674)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -3828,7 +3828,7 @@ pub unsafe fn _mm_maskz_fmsubadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m1
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, alternatively subtract and add packed elements in c from/to the intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsubadd_pd&expand=2673)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fmsubadd_pd&expand=2673)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfmsubadd))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -3839,7 +3839,7 @@ pub unsafe fn _mm_mask3_fmsubadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mma
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmadd_ps&expand=2723)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmadd_ps&expand=2723)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -3851,7 +3851,7 @@ pub unsafe fn _mm512_fnmadd_ps(a: __m512, b: __m512, c: __m512) -> __m512 {
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmadd_ps&expand=2724)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmadd_ps&expand=2724)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -3862,7 +3862,7 @@ pub unsafe fn _mm512_mask_fnmadd_ps(a: __m512, k: __mmask16, b: __m512, c: __m51
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmadd_ps&expand=2726)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmadd_ps&expand=2726)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -3874,7 +3874,7 @@ pub unsafe fn _mm512_maskz_fnmadd_ps(k: __mmask16, a: __m512, b: __m512, c: __m5
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmadd_ps&expand=2725)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmadd_ps&expand=2725)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -3885,7 +3885,7 @@ pub unsafe fn _mm512_mask3_fnmadd_ps(a: __m512, b: __m512, c: __m512, k: __mmask
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fnmadd_ps&expand=2720)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fnmadd_ps&expand=2720)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -3896,7 +3896,7 @@ pub unsafe fn _mm256_mask_fnmadd_ps(a: __m256, k: __mmask8, b: __m256, c: __m256
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fnmadd_ps&expand=2722)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fnmadd_ps&expand=2722)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -3908,7 +3908,7 @@ pub unsafe fn _mm256_maskz_fnmadd_ps(k: __mmask8, a: __m256, b: __m256, c: __m25
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fnmadd_ps&expand=2721)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fnmadd_ps&expand=2721)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -3919,7 +3919,7 @@ pub unsafe fn _mm256_mask3_fnmadd_ps(a: __m256, b: __m256, c: __m256, k: __mmask
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmadd_ps&expand=2716)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fnmadd_ps&expand=2716)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -3930,7 +3930,7 @@ pub unsafe fn _mm_mask_fnmadd_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmadd_ps&expand=2718)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fnmadd_ps&expand=2718)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -3942,7 +3942,7 @@ pub unsafe fn _mm_maskz_fnmadd_ps(k: __mmask8, a: __m128, b: __m128, c: __m128)
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmadd_ps&expand=2717)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fnmadd_ps&expand=2717)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -3953,7 +3953,7 @@ pub unsafe fn _mm_mask3_fnmadd_ps(a: __m128, b: __m128, c: __m128, k: __mmask8)
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmadd_pd&expand=2711)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmadd_pd&expand=2711)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -3965,7 +3965,7 @@ pub unsafe fn _mm512_fnmadd_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d {
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmadd_pd&expand=2712)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmadd_pd&expand=2712)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -3976,7 +3976,7 @@ pub unsafe fn _mm512_mask_fnmadd_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m5
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmadd_pd&expand=2714)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmadd_pd&expand=2714)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -3988,7 +3988,7 @@ pub unsafe fn _mm512_maskz_fnmadd_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmadd_pd&expand=2713)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmadd_pd&expand=2713)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -3999,7 +3999,7 @@ pub unsafe fn _mm512_mask3_fnmadd_pd(a: __m512d, b: __m512d, c: __m512d, k: __mm
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fnmadd_pd&expand=2708)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fnmadd_pd&expand=2708)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -4010,7 +4010,7 @@ pub unsafe fn _mm256_mask_fnmadd_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m2
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fnmadd_pd&expand=2710)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fnmadd_pd&expand=2710)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -4022,7 +4022,7 @@ pub unsafe fn _mm256_maskz_fnmadd_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fnmadd_pd&expand=2709)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fnmadd_pd&expand=2709)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -4033,7 +4033,7 @@ pub unsafe fn _mm256_mask3_fnmadd_pd(a: __m256d, b: __m256d, c: __m256d, k: __mm
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmadd_pd&expand=2704)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fnmadd_pd&expand=2704)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -4044,7 +4044,7 @@ pub unsafe fn _mm_mask_fnmadd_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmadd_pd&expand=2706)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fnmadd_pd&expand=2706)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -4056,7 +4056,7 @@ pub unsafe fn _mm_maskz_fnmadd_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, add the negated intermediate result to packed elements in c, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmadd_pd&expand=2705)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fnmadd_pd&expand=2705)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmadd))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -4067,7 +4067,7 @@ pub unsafe fn _mm_mask3_fnmadd_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmsub_ps&expand=2771)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmsub_ps&expand=2771)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -4080,7 +4080,7 @@ pub unsafe fn _mm512_fnmsub_ps(a: __m512, b: __m512, c: __m512) -> __m512 {
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmsub_ps&expand=2772)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmsub_ps&expand=2772)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -4091,7 +4091,7 @@ pub unsafe fn _mm512_mask_fnmsub_ps(a: __m512, k: __mmask16, b: __m512, c: __m51
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmsub_ps&expand=2774)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmsub_ps&expand=2774)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -4103,7 +4103,7 @@ pub unsafe fn _mm512_maskz_fnmsub_ps(k: __mmask16, a: __m512, b: __m512, c: __m5
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmsub_ps&expand=2773)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmsub_ps&expand=2773)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -4114,7 +4114,7 @@ pub unsafe fn _mm512_mask3_fnmsub_ps(a: __m512, b: __m512, c: __m512, k: __mmask
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fnmsub_ps&expand=2768)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fnmsub_ps&expand=2768)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -4125,7 +4125,7 @@ pub unsafe fn _mm256_mask_fnmsub_ps(a: __m256, k: __mmask8, b: __m256, c: __m256
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fnmsub_ps&expand=2770)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fnmsub_ps&expand=2770)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -4137,7 +4137,7 @@ pub unsafe fn _mm256_maskz_fnmsub_ps(k: __mmask8, a: __m256, b: __m256, c: __m25
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fnmsub_ps&expand=2769)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fnmsub_ps&expand=2769)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -4148,7 +4148,7 @@ pub unsafe fn _mm256_mask3_fnmsub_ps(a: __m256, b: __m256, c: __m256, k: __mmask
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmsub_ps&expand=2764)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fnmsub_ps&expand=2764)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -4159,7 +4159,7 @@ pub unsafe fn _mm_mask_fnmsub_ps(a: __m128, k: __mmask8, b: __m128, c: __m128) -
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmsub_ps&expand=2766)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fnmsub_ps&expand=2766)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -4171,7 +4171,7 @@ pub unsafe fn _mm_maskz_fnmsub_ps(k: __mmask8, a: __m128, b: __m128, c: __m128)
/// Multiply packed single-precision (32-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_ps&expand=2765)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fnmsub_ps&expand=2765)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -4182,7 +4182,7 @@ pub unsafe fn _mm_mask3_fnmsub_ps(a: __m128, b: __m128, c: __m128, k: __mmask8)
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmsub_pd&expand=2759)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmsub_pd&expand=2759)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -4195,7 +4195,7 @@ pub unsafe fn _mm512_fnmsub_pd(a: __m512d, b: __m512d, c: __m512d) -> __m512d {
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmsub_pd&expand=2760)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmsub_pd&expand=2760)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -4206,7 +4206,7 @@ pub unsafe fn _mm512_mask_fnmsub_pd(a: __m512d, k: __mmask8, b: __m512d, c: __m5
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmsub_pd&expand=2762)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmsub_pd&expand=2762)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -4218,7 +4218,7 @@ pub unsafe fn _mm512_maskz_fnmsub_pd(k: __mmask8, a: __m512d, b: __m512d, c: __m
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmsub_pd&expand=2761)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmsub_pd&expand=2761)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -4229,7 +4229,7 @@ pub unsafe fn _mm512_mask3_fnmsub_pd(a: __m512d, b: __m512d, c: __m512d, k: __mm
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fnmsub_pd&expand=2756)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fnmsub_pd&expand=2756)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -4240,7 +4240,7 @@ pub unsafe fn _mm256_mask_fnmsub_pd(a: __m256d, k: __mmask8, b: __m256d, c: __m2
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fnmsub_pd&expand=2758)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fnmsub_pd&expand=2758)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -4252,7 +4252,7 @@ pub unsafe fn _mm256_maskz_fnmsub_pd(k: __mmask8, a: __m256d, b: __m256d, c: __m
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fnmsub_pd&expand=2757)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask3_fnmsub_pd&expand=2757)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -4263,7 +4263,7 @@ pub unsafe fn _mm256_mask3_fnmsub_pd(a: __m256d, b: __m256d, c: __m256d, k: __mm
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmsub_pd&expand=2752)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fnmsub_pd&expand=2752)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -4274,7 +4274,7 @@ pub unsafe fn _mm_mask_fnmsub_pd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmsub_pd&expand=2754)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fnmsub_pd&expand=2754)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -4286,7 +4286,7 @@ pub unsafe fn _mm_maskz_fnmsub_pd(k: __mmask8, a: __m128d, b: __m128d, c: __m128
/// Multiply packed double-precision (64-bit) floating-point elements in a and b, subtract packed elements in c from the negated intermediate result, and store the results in dst using writemask k (elements are copied from c when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_pd&expand=2753)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask3_fnmsub_pd&expand=2753)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfnmsub))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -4297,7 +4297,7 @@ pub unsafe fn _mm_mask3_fnmsub_pd(a: __m128d, b: __m128d, c: __m128d, k: __mmask
/// Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in a, and store the results in dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rcp14_ps&expand=4502)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rcp14_ps&expand=4502)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrcp14ps))]
@@ -4311,7 +4311,7 @@ pub unsafe fn _mm512_rcp14_ps(a: __m512) -> __m512 {
/// Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rcp14_ps&expand=4500)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rcp14_ps&expand=4500)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrcp14ps))]
@@ -4321,7 +4321,7 @@ pub unsafe fn _mm512_mask_rcp14_ps(src: __m512, k: __mmask16, a: __m512) -> __m5
/// Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rcp14_ps&expand=4501)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rcp14_ps&expand=4501)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrcp14ps))]
@@ -4331,7 +4331,7 @@ pub unsafe fn _mm512_maskz_rcp14_ps(k: __mmask16, a: __m512) -> __m512 {
/// Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in a, and store the results in dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rcp14_ps&expand=4499)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rcp14_ps&expand=4499)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrcp14ps))]
@@ -4345,7 +4345,7 @@ pub unsafe fn _mm256_rcp14_ps(a: __m256) -> __m256 {
/// Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rcp14_ps&expand=4497)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rcp14_ps&expand=4497)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrcp14ps))]
@@ -4355,7 +4355,7 @@ pub unsafe fn _mm256_mask_rcp14_ps(src: __m256, k: __mmask8, a: __m256) -> __m25
/// Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rcp14_ps&expand=4498)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rcp14_ps&expand=4498)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrcp14ps))]
@@ -4365,7 +4365,7 @@ pub unsafe fn _mm256_maskz_rcp14_ps(k: __mmask8, a: __m256) -> __m256 {
/// Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in a, and store the results in dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp14_ps&expand=4496)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp14_ps&expand=4496)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrcp14ps))]
@@ -4379,7 +4379,7 @@ pub unsafe fn _mm_rcp14_ps(a: __m128) -> __m128 {
/// Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rcp14_ps&expand=4494)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rcp14_ps&expand=4494)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrcp14ps))]
@@ -4389,7 +4389,7 @@ pub unsafe fn _mm_mask_rcp14_ps(src: __m128, k: __mmask8, a: __m128) -> __m128 {
/// Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rcp14_ps&expand=4495)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rcp14_ps&expand=4495)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrcp14ps))]
@@ -4399,7 +4399,7 @@ pub unsafe fn _mm_maskz_rcp14_ps(k: __mmask8, a: __m128) -> __m128 {
/// Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in a, and store the results in dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rcp14_pd&expand=4493)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rcp14_pd&expand=4493)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrcp14pd))]
@@ -4413,7 +4413,7 @@ pub unsafe fn _mm512_rcp14_pd(a: __m512d) -> __m512d {
/// Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rcp14_pd&expand=4491)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rcp14_pd&expand=4491)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrcp14pd))]
@@ -4423,7 +4423,7 @@ pub unsafe fn _mm512_mask_rcp14_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m
/// Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rcp14_pd&expand=4492)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rcp14_pd&expand=4492)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrcp14pd))]
@@ -4433,7 +4433,7 @@ pub unsafe fn _mm512_maskz_rcp14_pd(k: __mmask8, a: __m512d) -> __m512d {
/// Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in a, and store the results in dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rcp14_pd&expand=4490)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rcp14_pd&expand=4490)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrcp14pd))]
@@ -4447,7 +4447,7 @@ pub unsafe fn _mm256_rcp14_pd(a: __m256d) -> __m256d {
/// Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rcp14_pd&expand=4488)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rcp14_pd&expand=4488)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrcp14pd))]
@@ -4457,7 +4457,7 @@ pub unsafe fn _mm256_mask_rcp14_pd(src: __m256d, k: __mmask8, a: __m256d) -> __m
/// Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rcp14_pd&expand=4489)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rcp14_pd&expand=4489)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrcp14pd))]
@@ -4467,7 +4467,7 @@ pub unsafe fn _mm256_maskz_rcp14_pd(k: __mmask8, a: __m256d) -> __m256d {
/// Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in a, and store the results in dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp14_pd&expand=4487)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp14_pd&expand=4487)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrcp14pd))]
@@ -4481,7 +4481,7 @@ pub unsafe fn _mm_rcp14_pd(a: __m128d) -> __m128d {
/// Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rcp14_pd&expand=4485)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rcp14_pd&expand=4485)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrcp14pd))]
@@ -4491,7 +4491,7 @@ pub unsafe fn _mm_mask_rcp14_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m128
/// Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rcp14_pd&expand=4486)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rcp14_pd&expand=4486)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrcp14pd))]
@@ -4501,7 +4501,7 @@ pub unsafe fn _mm_maskz_rcp14_pd(k: __mmask8, a: __m128d) -> __m128d {
/// Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rsqrt14_ps&expand=4819)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rsqrt14_ps&expand=4819)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrsqrt14ps))]
@@ -4515,7 +4515,7 @@ pub unsafe fn _mm512_rsqrt14_ps(a: __m512) -> __m512 {
/// Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rsqrt14_ps&expand=4817)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rsqrt14_ps&expand=4817)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrsqrt14ps))]
@@ -4525,7 +4525,7 @@ pub unsafe fn _mm512_mask_rsqrt14_ps(src: __m512, k: __mmask16, a: __m512) -> __
/// Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rsqrt14_ps&expand=4818)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rsqrt14_ps&expand=4818)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrsqrt14ps))]
@@ -4539,7 +4539,7 @@ pub unsafe fn _mm512_maskz_rsqrt14_ps(k: __mmask16, a: __m512) -> __m512 {
/// Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rsqrt14_ps&expand=4815)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rsqrt14_ps&expand=4815)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrt14ps))]
@@ -4549,7 +4549,7 @@ pub unsafe fn _mm256_mask_rsqrt14_ps(src: __m256, k: __mmask8, a: __m256) -> __m
/// Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rsqrt14_ps&expand=4816)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rsqrt14_ps&expand=4816)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrt14ps))]
@@ -4563,7 +4563,7 @@ pub unsafe fn _mm256_maskz_rsqrt14_ps(k: __mmask8, a: __m256) -> __m256 {
/// Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rsqrt14_ps&expand=4813)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rsqrt14_ps&expand=4813)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrt14ps))]
@@ -4573,7 +4573,7 @@ pub unsafe fn _mm_mask_rsqrt14_ps(src: __m128, k: __mmask8, a: __m128) -> __m128
/// Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rsqrt14_ps&expand=4814)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rsqrt14_ps&expand=4814)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrt14ps))]
@@ -4583,7 +4583,7 @@ pub unsafe fn _mm_maskz_rsqrt14_ps(k: __mmask8, a: __m128) -> __m128 {
/// Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rsqrt14_pd&expand=4812)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rsqrt14_pd&expand=4812)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrsqrt14pd))]
@@ -4597,7 +4597,7 @@ pub unsafe fn _mm512_rsqrt14_pd(a: __m512d) -> __m512d {
/// Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rsqrt14_pd&expand=4810)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rsqrt14_pd&expand=4810)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrsqrt14pd))]
@@ -4607,7 +4607,7 @@ pub unsafe fn _mm512_mask_rsqrt14_pd(src: __m512d, k: __mmask8, a: __m512d) -> _
/// Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rsqrt14_pd&expand=4811)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rsqrt14_pd&expand=4811)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrsqrt14pd))]
@@ -4617,7 +4617,7 @@ pub unsafe fn _mm512_maskz_rsqrt14_pd(k: __mmask8, a: __m512d) -> __m512d {
/// Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rsqrt14_pd&expand=4808)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rsqrt14_pd&expand=4808)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrt14pd))]
@@ -4627,7 +4627,7 @@ pub unsafe fn _mm256_mask_rsqrt14_pd(src: __m256d, k: __mmask8, a: __m256d) -> _
/// Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rsqrt14_pd&expand=4809)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rsqrt14_pd&expand=4809)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrt14pd))]
@@ -4641,7 +4641,7 @@ pub unsafe fn _mm256_maskz_rsqrt14_pd(k: __mmask8, a: __m256d) -> __m256d {
/// Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rsqrt14_pd&expand=4806)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rsqrt14_pd&expand=4806)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrt14pd))]
@@ -4651,7 +4651,7 @@ pub unsafe fn _mm_mask_rsqrt14_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m1
/// Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rsqrt14_pd&expand=4807)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rsqrt14_pd&expand=4807)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrsqrt14pd))]
@@ -4661,7 +4661,7 @@ pub unsafe fn _mm_maskz_rsqrt14_pd(k: __mmask8, a: __m128d) -> __m128d {
/// Convert the exponent of each packed single-precision (32-bit) floating-point element in a to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in dst. This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getexp_ps&expand=2844)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getexp_ps&expand=2844)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpps))]
@@ -4676,7 +4676,7 @@ pub unsafe fn _mm512_getexp_ps(a: __m512) -> __m512 {
/// Convert the exponent of each packed single-precision (32-bit) floating-point element in a to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getexp_ps&expand=2845)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getexp_ps&expand=2845)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpps))]
@@ -4691,7 +4691,7 @@ pub unsafe fn _mm512_mask_getexp_ps(src: __m512, k: __mmask16, a: __m512) -> __m
/// Convert the exponent of each packed single-precision (32-bit) floating-point element in a to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getexp_ps&expand=2846)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getexp_ps&expand=2846)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpps))]
@@ -4706,7 +4706,7 @@ pub unsafe fn _mm512_maskz_getexp_ps(k: __mmask16, a: __m512) -> __m512 {
/// Convert the exponent of each packed single-precision (32-bit) floating-point element in a to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in dst. This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_getexp_ps&expand=2841)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_getexp_ps&expand=2841)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexpps))]
@@ -4720,7 +4720,7 @@ pub unsafe fn _mm256_getexp_ps(a: __m256) -> __m256 {
/// Convert the exponent of each packed single-precision (32-bit) floating-point element in a to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getexp_ps&expand=2842)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_getexp_ps&expand=2842)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexpps))]
@@ -4730,7 +4730,7 @@ pub unsafe fn _mm256_mask_getexp_ps(src: __m256, k: __mmask8, a: __m256) -> __m2
/// Convert the exponent of each packed single-precision (32-bit) floating-point element in a to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getexp_ps&expand=2843)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_getexp_ps&expand=2843)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexpps))]
@@ -4744,7 +4744,7 @@ pub unsafe fn _mm256_maskz_getexp_ps(k: __mmask8, a: __m256) -> __m256 {
/// Convert the exponent of each packed single-precision (32-bit) floating-point element in a to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in dst. This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_ps&expand=2838)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getexp_ps&expand=2838)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexpps))]
@@ -4758,7 +4758,7 @@ pub unsafe fn _mm_getexp_ps(a: __m128) -> __m128 {
/// Convert the exponent of each packed single-precision (32-bit) floating-point element in a to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_ps&expand=2839)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_getexp_ps&expand=2839)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexpps))]
@@ -4768,7 +4768,7 @@ pub unsafe fn _mm_mask_getexp_ps(src: __m128, k: __mmask8, a: __m128) -> __m128
/// Convert the exponent of each packed single-precision (32-bit) floating-point element in a to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_ps&expand=2840)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_getexp_ps&expand=2840)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexpps))]
@@ -4778,7 +4778,7 @@ pub unsafe fn _mm_maskz_getexp_ps(k: __mmask8, a: __m128) -> __m128 {
/// Convert the exponent of each packed double-precision (64-bit) floating-point element in a to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in dst. This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getexp_pd&expand=2835)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getexp_pd&expand=2835)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexppd))]
@@ -4793,7 +4793,7 @@ pub unsafe fn _mm512_getexp_pd(a: __m512d) -> __m512d {
/// Convert the exponent of each packed double-precision (64-bit) floating-point element in a to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getexp_pd&expand=2836)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getexp_pd&expand=2836)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexppd))]
@@ -4808,7 +4808,7 @@ pub unsafe fn _mm512_mask_getexp_pd(src: __m512d, k: __mmask8, a: __m512d) -> __
/// Convert the exponent of each packed double-precision (64-bit) floating-point element in a to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getexp_pd&expand=2837)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getexp_pd&expand=2837)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexppd))]
@@ -4823,7 +4823,7 @@ pub unsafe fn _mm512_maskz_getexp_pd(k: __mmask8, a: __m512d) -> __m512d {
/// Convert the exponent of each packed double-precision (64-bit) floating-point element in a to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in dst. This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_getexp_pd&expand=2832)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_getexp_pd&expand=2832)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexppd))]
@@ -4837,7 +4837,7 @@ pub unsafe fn _mm256_getexp_pd(a: __m256d) -> __m256d {
/// Convert the exponent of each packed double-precision (64-bit) floating-point element in a to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getexp_pd&expand=2833)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_getexp_pd&expand=2833)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexppd))]
@@ -4847,7 +4847,7 @@ pub unsafe fn _mm256_mask_getexp_pd(src: __m256d, k: __mmask8, a: __m256d) -> __
/// Convert the exponent of each packed double-precision (64-bit) floating-point element in a to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getexp_pd&expand=2834)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_getexp_pd&expand=2834)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexppd))]
@@ -4861,7 +4861,7 @@ pub unsafe fn _mm256_maskz_getexp_pd(k: __mmask8, a: __m256d) -> __m256d {
/// Convert the exponent of each packed double-precision (64-bit) floating-point element in a to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in dst. This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_pd&expand=2829)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getexp_pd&expand=2829)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexppd))]
@@ -4875,7 +4875,7 @@ pub unsafe fn _mm_getexp_pd(a: __m128d) -> __m128d {
/// Convert the exponent of each packed double-precision (64-bit) floating-point element in a to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_pd&expand=2830)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_getexp_pd&expand=2830)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexppd))]
@@ -4885,7 +4885,7 @@ pub unsafe fn _mm_mask_getexp_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m12
/// Convert the exponent of each packed double-precision (64-bit) floating-point element in a to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_pd&expand=2831)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_getexp_pd&expand=2831)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetexppd))]
@@ -4901,13 +4901,13 @@ pub unsafe fn _mm_maskz_getexp_pd(k: __mmask8, a: __m128d) -> __m128d {
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_roundscale_ps&expand=4784)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_roundscale_ps&expand=4784)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_roundscale_ps<const IMM8: i32>(a: __m512) -> __m512 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x16();
let zero = _mm512_setzero_ps().as_f32x16();
let r = vrndscaleps(a, IMM8, zero, 0b11111111_11111111, _MM_FROUND_CUR_DIRECTION);
@@ -4922,7 +4922,7 @@ pub unsafe fn _mm512_roundscale_ps<const IMM8: i32>(a: __m512) -> __m512 {
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_roundscale_ps&expand=4782)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_roundscale_ps&expand=4782)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))]
@@ -4932,7 +4932,7 @@ pub unsafe fn _mm512_mask_roundscale_ps<const IMM8: i32>(
k: __mmask16,
a: __m512,
) -> __m512 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x16();
let src = src.as_f32x16();
let r = vrndscaleps(a, IMM8, src, k, _MM_FROUND_CUR_DIRECTION);
@@ -4947,13 +4947,13 @@ pub unsafe fn _mm512_mask_roundscale_ps<const IMM8: i32>(
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_roundscale_ps&expand=4783)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_roundscale_ps&expand=4783)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_roundscale_ps<const IMM8: i32>(k: __mmask16, a: __m512) -> __m512 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x16();
let zero = _mm512_setzero_ps().as_f32x16();
let r = vrndscaleps(a, IMM8, zero, k, _MM_FROUND_CUR_DIRECTION);
@@ -4968,13 +4968,13 @@ pub unsafe fn _mm512_maskz_roundscale_ps<const IMM8: i32>(k: __mmask16, a: __m51
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_roundscale_ps&expand=4781)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_roundscale_ps&expand=4781)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 250))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm256_roundscale_ps<const IMM8: i32>(a: __m256) -> __m256 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x8();
let zero = _mm256_setzero_ps().as_f32x8();
let r = vrndscaleps256(a, IMM8, zero, 0b11111111);
@@ -4989,7 +4989,7 @@ pub unsafe fn _mm256_roundscale_ps<const IMM8: i32>(a: __m256) -> __m256 {
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_roundscale_ps&expand=4779)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_roundscale_ps&expand=4779)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))]
@@ -4999,7 +4999,7 @@ pub unsafe fn _mm256_mask_roundscale_ps<const IMM8: i32>(
k: __mmask8,
a: __m256,
) -> __m256 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x8();
let src = src.as_f32x8();
let r = vrndscaleps256(a, IMM8, src, k);
@@ -5014,13 +5014,13 @@ pub unsafe fn _mm256_mask_roundscale_ps<const IMM8: i32>(
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_roundscale_ps&expand=4780)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_roundscale_ps&expand=4780)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_roundscale_ps<const IMM8: i32>(k: __mmask8, a: __m256) -> __m256 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x8();
let zero = _mm256_setzero_ps().as_f32x8();
let r = vrndscaleps256(a, IMM8, zero, k);
@@ -5035,13 +5035,13 @@ pub unsafe fn _mm256_maskz_roundscale_ps<const IMM8: i32>(k: __mmask8, a: __m256
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_roundscale_ps&expand=4778)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_roundscale_ps&expand=4778)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 250))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm_roundscale_ps<const IMM8: i32>(a: __m128) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let zero = _mm_setzero_ps().as_f32x4();
let r = vrndscaleps128(a, IMM8, zero, 0b00001111);
@@ -5056,7 +5056,7 @@ pub unsafe fn _mm_roundscale_ps<const IMM8: i32>(a: __m128) -> __m128 {
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_ps&expand=4776)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_roundscale_ps&expand=4776)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))]
@@ -5066,7 +5066,7 @@ pub unsafe fn _mm_mask_roundscale_ps<const IMM8: i32>(
k: __mmask8,
a: __m128,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let src = src.as_f32x4();
let r = vrndscaleps128(a, IMM8, src, k);
@@ -5081,13 +5081,13 @@ pub unsafe fn _mm_mask_roundscale_ps<const IMM8: i32>(
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_ps&expand=4777)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_roundscale_ps&expand=4777)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_roundscale_ps<const IMM8: i32>(k: __mmask8, a: __m128) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let zero = _mm_setzero_ps().as_f32x4();
let r = vrndscaleps128(a, IMM8, zero, k);
@@ -5102,13 +5102,13 @@ pub unsafe fn _mm_maskz_roundscale_ps<const IMM8: i32>(k: __mmask8, a: __m128) -
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_roundscale_pd&expand=4775)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_roundscale_pd&expand=4775)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_roundscale_pd<const IMM8: i32>(a: __m512d) -> __m512d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x8();
let zero = _mm512_setzero_pd().as_f64x8();
let r = vrndscalepd(a, IMM8, zero, 0b11111111, _MM_FROUND_CUR_DIRECTION);
@@ -5123,7 +5123,7 @@ pub unsafe fn _mm512_roundscale_pd<const IMM8: i32>(a: __m512d) -> __m512d {
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_roundscale_pd&expand=4773)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_roundscale_pd&expand=4773)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))]
@@ -5133,7 +5133,7 @@ pub unsafe fn _mm512_mask_roundscale_pd<const IMM8: i32>(
k: __mmask8,
a: __m512d,
) -> __m512d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x8();
let src = src.as_f64x8();
let r = vrndscalepd(a, IMM8, src, k, _MM_FROUND_CUR_DIRECTION);
@@ -5148,13 +5148,13 @@ pub unsafe fn _mm512_mask_roundscale_pd<const IMM8: i32>(
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_roundscale_pd&expand=4774)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_roundscale_pd&expand=4774)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_roundscale_pd<const IMM8: i32>(k: __mmask8, a: __m512d) -> __m512d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x8();
let zero = _mm512_setzero_pd().as_f64x8();
let r = vrndscalepd(a, IMM8, zero, k, _MM_FROUND_CUR_DIRECTION);
@@ -5169,13 +5169,13 @@ pub unsafe fn _mm512_maskz_roundscale_pd<const IMM8: i32>(k: __mmask8, a: __m512
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_roundscale_pd&expand=4772)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_roundscale_pd&expand=4772)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm256_roundscale_pd<const IMM8: i32>(a: __m256d) -> __m256d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x4();
let zero = _mm256_setzero_pd().as_f64x4();
let r = vrndscalepd256(a, IMM8, zero, 0b00001111);
@@ -5190,7 +5190,7 @@ pub unsafe fn _mm256_roundscale_pd<const IMM8: i32>(a: __m256d) -> __m256d {
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_roundscale_pd&expand=4770)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_roundscale_pd&expand=4770)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))]
@@ -5200,7 +5200,7 @@ pub unsafe fn _mm256_mask_roundscale_pd<const IMM8: i32>(
k: __mmask8,
a: __m256d,
) -> __m256d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x4();
let src = src.as_f64x4();
let r = vrndscalepd256(a, IMM8, src, k);
@@ -5215,13 +5215,13 @@ pub unsafe fn _mm256_mask_roundscale_pd<const IMM8: i32>(
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_roundscale_pd&expand=4771)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_roundscale_pd&expand=4771)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_roundscale_pd<const IMM8: i32>(k: __mmask8, a: __m256d) -> __m256d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x4();
let zero = _mm256_setzero_pd().as_f64x4();
let r = vrndscalepd256(a, IMM8, zero, k);
@@ -5236,13 +5236,13 @@ pub unsafe fn _mm256_maskz_roundscale_pd<const IMM8: i32>(k: __mmask8, a: __m256
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_roundscale_pd&expand=4769)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_roundscale_pd&expand=4769)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm_roundscale_pd<const IMM8: i32>(a: __m128d) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x2();
let zero = _mm_setzero_pd().as_f64x2();
let r = vrndscalepd128(a, IMM8, zero, 0b00000011);
@@ -5257,7 +5257,7 @@ pub unsafe fn _mm_roundscale_pd<const IMM8: i32>(a: __m128d) -> __m128d {
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_pd&expand=4767)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_roundscale_pd&expand=4767)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))]
@@ -5267,7 +5267,7 @@ pub unsafe fn _mm_mask_roundscale_pd<const IMM8: i32>(
k: __mmask8,
a: __m128d,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x2();
let src = src.as_f64x2();
let r = vrndscalepd128(a, IMM8, src, k);
@@ -5282,13 +5282,13 @@ pub unsafe fn _mm_mask_roundscale_pd<const IMM8: i32>(
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_pd&expand=4768)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_roundscale_pd&expand=4768)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_roundscale_pd<const IMM8: i32>(k: __mmask8, a: __m128d) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x2();
let zero = _mm_setzero_pd().as_f64x2();
let r = vrndscalepd128(a, IMM8, zero, k);
@@ -5297,7 +5297,7 @@ pub unsafe fn _mm_maskz_roundscale_pd<const IMM8: i32>(k: __mmask8, a: __m128d)
/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_scalef_ps&expand=4883)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_scalef_ps&expand=4883)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefps))]
@@ -5313,7 +5313,7 @@ pub unsafe fn _mm512_scalef_ps(a: __m512, b: __m512) -> __m512 {
/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_ps&expand=4881)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_scalef_ps&expand=4881)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefps))]
@@ -5329,7 +5329,7 @@ pub unsafe fn _mm512_mask_scalef_ps(src: __m512, k: __mmask16, a: __m512, b: __m
/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_scalef_ps&expand=4882)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_scalef_ps&expand=4882)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefps))]
@@ -5345,7 +5345,7 @@ pub unsafe fn _mm512_maskz_scalef_ps(k: __mmask16, a: __m512, b: __m512) -> __m5
/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_scalef_ps&expand=4880)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_scalef_ps&expand=4880)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefps))]
@@ -5360,7 +5360,7 @@ pub unsafe fn _mm256_scalef_ps(a: __m256, b: __m256) -> __m256 {
/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_scalef_ps&expand=4878)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_scalef_ps&expand=4878)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefps))]
@@ -5370,7 +5370,7 @@ pub unsafe fn _mm256_mask_scalef_ps(src: __m256, k: __mmask8, a: __m256, b: __m2
/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_scalef_ps&expand=4879)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_scalef_ps&expand=4879)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefps))]
@@ -5385,7 +5385,7 @@ pub unsafe fn _mm256_maskz_scalef_ps(k: __mmask8, a: __m256, b: __m256) -> __m25
/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_ps&expand=4877)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_scalef_ps&expand=4877)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefps))]
@@ -5400,7 +5400,7 @@ pub unsafe fn _mm_scalef_ps(a: __m128, b: __m128) -> __m128 {
/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_ps&expand=4875)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_scalef_ps&expand=4875)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefps))]
@@ -5410,7 +5410,7 @@ pub unsafe fn _mm_mask_scalef_ps(src: __m128, k: __mmask8, a: __m128, b: __m128)
/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_ps&expand=4876)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_scalef_ps&expand=4876)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefps))]
@@ -5425,7 +5425,7 @@ pub unsafe fn _mm_maskz_scalef_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Scale the packed double-precision (64-bit) floating-point elements in a using values from b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_scalef_pd&expand=4874)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_scalef_pd&expand=4874)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefpd))]
@@ -5441,7 +5441,7 @@ pub unsafe fn _mm512_scalef_pd(a: __m512d, b: __m512d) -> __m512d {
/// Scale the packed double-precision (64-bit) floating-point elements in a using values from b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_pd&expand=4872)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_scalef_pd&expand=4872)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefpd))]
@@ -5457,7 +5457,7 @@ pub unsafe fn _mm512_mask_scalef_pd(src: __m512d, k: __mmask8, a: __m512d, b: __
/// Scale the packed double-precision (64-bit) floating-point elements in a using values from b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_scalef_pd&expand=4873)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_scalef_pd&expand=4873)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefpd))]
@@ -5473,7 +5473,7 @@ pub unsafe fn _mm512_maskz_scalef_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m
/// Scale the packed double-precision (64-bit) floating-point elements in a using values from b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_scalef_pd&expand=4871)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_scalef_pd&expand=4871)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefpd))]
@@ -5488,7 +5488,7 @@ pub unsafe fn _mm256_scalef_pd(a: __m256d, b: __m256d) -> __m256d {
/// Scale the packed double-precision (64-bit) floating-point elements in a using values from b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_scalef_pd&expand=4869)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_scalef_pd&expand=4869)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefpd))]
@@ -5498,7 +5498,7 @@ pub unsafe fn _mm256_mask_scalef_pd(src: __m256d, k: __mmask8, a: __m256d, b: __
/// Scale the packed double-precision (64-bit) floating-point elements in a using values from b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_scalef_pd&expand=4870)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_scalef_pd&expand=4870)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefpd))]
@@ -5513,7 +5513,7 @@ pub unsafe fn _mm256_maskz_scalef_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m
/// Scale the packed double-precision (64-bit) floating-point elements in a using values from b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_pd&expand=4868)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_scalef_pd&expand=4868)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefpd))]
@@ -5528,7 +5528,7 @@ pub unsafe fn _mm_scalef_pd(a: __m128d, b: __m128d) -> __m128d {
/// Scale the packed double-precision (64-bit) floating-point elements in a using values from b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_pd&expand=4866)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_scalef_pd&expand=4866)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefpd))]
@@ -5538,7 +5538,7 @@ pub unsafe fn _mm_mask_scalef_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m12
/// Scale the packed double-precision (64-bit) floating-point elements in a using values from b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_pd&expand=4867)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_scalef_pd&expand=4867)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vscalefpd))]
@@ -5553,13 +5553,13 @@ pub unsafe fn _mm_maskz_scalef_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128
/// Fix up packed single-precision (32-bit) floating-point elements in a and b using packed 32-bit integers in c, and store the results in dst. imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fixupimm_ps&expand=2499)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fixupimm_ps&expand=2499)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm512_fixupimm_ps<const IMM8: i32>(a: __m512, b: __m512, c: __m512i) -> __m512 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x16();
let b = b.as_f32x16();
let c = c.as_i32x16();
@@ -5569,7 +5569,7 @@ pub unsafe fn _mm512_fixupimm_ps<const IMM8: i32>(a: __m512, b: __m512, c: __m51
/// Fix up packed single-precision (32-bit) floating-point elements in a and b using packed 32-bit integers in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fixupimm_ps&expand=2500)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fixupimm_ps&expand=2500)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))]
@@ -5580,7 +5580,7 @@ pub unsafe fn _mm512_mask_fixupimm_ps<const IMM8: i32>(
b: __m512,
c: __m512i,
) -> __m512 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x16();
let b = b.as_f32x16();
let c = c.as_i32x16();
@@ -5590,7 +5590,7 @@ pub unsafe fn _mm512_mask_fixupimm_ps<const IMM8: i32>(
/// Fix up packed single-precision (32-bit) floating-point elements in a and b using packed 32-bit integers in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fixupimm_ps&expand=2501)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fixupimm_ps&expand=2501)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))]
@@ -5601,7 +5601,7 @@ pub unsafe fn _mm512_maskz_fixupimm_ps<const IMM8: i32>(
b: __m512,
c: __m512i,
) -> __m512 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x16();
let b = b.as_f32x16();
let c = c.as_i32x16();
@@ -5611,13 +5611,13 @@ pub unsafe fn _mm512_maskz_fixupimm_ps<const IMM8: i32>(
/// Fix up packed single-precision (32-bit) floating-point elements in a and b using packed 32-bit integers in c, and store the results in dst. imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fixupimm_ps&expand=2496)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fixupimm_ps&expand=2496)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm256_fixupimm_ps<const IMM8: i32>(a: __m256, b: __m256, c: __m256i) -> __m256 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x8();
let b = b.as_f32x8();
let c = c.as_i32x8();
@@ -5627,7 +5627,7 @@ pub unsafe fn _mm256_fixupimm_ps<const IMM8: i32>(a: __m256, b: __m256, c: __m25
/// Fix up packed single-precision (32-bit) floating-point elements in a and b using packed 32-bit integers in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fixupimm_ps&expand=2497)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fixupimm_ps&expand=2497)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))]
@@ -5638,7 +5638,7 @@ pub unsafe fn _mm256_mask_fixupimm_ps<const IMM8: i32>(
b: __m256,
c: __m256i,
) -> __m256 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x8();
let b = b.as_f32x8();
let c = c.as_i32x8();
@@ -5648,7 +5648,7 @@ pub unsafe fn _mm256_mask_fixupimm_ps<const IMM8: i32>(
/// Fix up packed single-precision (32-bit) floating-point elements in a and b using packed 32-bit integers in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fixupimm_ps&expand=2498)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fixupimm_ps&expand=2498)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))]
@@ -5659,7 +5659,7 @@ pub unsafe fn _mm256_maskz_fixupimm_ps<const IMM8: i32>(
b: __m256,
c: __m256i,
) -> __m256 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x8();
let b = b.as_f32x8();
let c = c.as_i32x8();
@@ -5669,13 +5669,13 @@ pub unsafe fn _mm256_maskz_fixupimm_ps<const IMM8: i32>(
/// Fix up packed single-precision (32-bit) floating-point elements in a and b using packed 32-bit integers in c, and store the results in dst. imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fixupimm_ps&expand=2493)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fixupimm_ps&expand=2493)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm_fixupimm_ps<const IMM8: i32>(a: __m128, b: __m128, c: __m128i) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let b = b.as_f32x4();
let c = c.as_i32x4();
@@ -5685,7 +5685,7 @@ pub unsafe fn _mm_fixupimm_ps<const IMM8: i32>(a: __m128, b: __m128, c: __m128i)
/// Fix up packed single-precision (32-bit) floating-point elements in a and b using packed 32-bit integers in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fixupimm_ps&expand=2494)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fixupimm_ps&expand=2494)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))]
@@ -5696,7 +5696,7 @@ pub unsafe fn _mm_mask_fixupimm_ps<const IMM8: i32>(
b: __m128,
c: __m128i,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let b = b.as_f32x4();
let c = c.as_i32x4();
@@ -5706,7 +5706,7 @@ pub unsafe fn _mm_mask_fixupimm_ps<const IMM8: i32>(
/// Fix up packed single-precision (32-bit) floating-point elements in a and b using packed 32-bit integers in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fixupimm_ps&expand=2495)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fixupimm_ps&expand=2495)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0))]
@@ -5717,7 +5717,7 @@ pub unsafe fn _mm_maskz_fixupimm_ps<const IMM8: i32>(
b: __m128,
c: __m128i,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let b = b.as_f32x4();
let c = c.as_i32x4();
@@ -5727,13 +5727,13 @@ pub unsafe fn _mm_maskz_fixupimm_ps<const IMM8: i32>(
/// Fix up packed double-precision (64-bit) floating-point elements in a and b using packed 64-bit integers in c, and store the results in dst. imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fixupimm_pd&expand=2490)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fixupimm_pd&expand=2490)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm512_fixupimm_pd<const IMM8: i32>(a: __m512d, b: __m512d, c: __m512i) -> __m512d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x8();
let b = b.as_f64x8();
let c = c.as_i64x8();
@@ -5743,7 +5743,7 @@ pub unsafe fn _mm512_fixupimm_pd<const IMM8: i32>(a: __m512d, b: __m512d, c: __m
/// Fix up packed double-precision (64-bit) floating-point elements in a and b using packed 64-bit integers in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fixupimm_pd&expand=2491)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fixupimm_pd&expand=2491)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))]
@@ -5754,7 +5754,7 @@ pub unsafe fn _mm512_mask_fixupimm_pd<const IMM8: i32>(
b: __m512d,
c: __m512i,
) -> __m512d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x8();
let b = b.as_f64x8();
let c = c.as_i64x8();
@@ -5764,7 +5764,7 @@ pub unsafe fn _mm512_mask_fixupimm_pd<const IMM8: i32>(
/// Fix up packed double-precision (64-bit) floating-point elements in a and b using packed 64-bit integers in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fixupimm_pd&expand=2492)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fixupimm_pd&expand=2492)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))]
@@ -5775,7 +5775,7 @@ pub unsafe fn _mm512_maskz_fixupimm_pd<const IMM8: i32>(
b: __m512d,
c: __m512i,
) -> __m512d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x8();
let b = b.as_f64x8();
let c = c.as_i64x8();
@@ -5785,13 +5785,13 @@ pub unsafe fn _mm512_maskz_fixupimm_pd<const IMM8: i32>(
/// Fix up packed double-precision (64-bit) floating-point elements in a and b using packed 64-bit integers in c, and store the results in dst. imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fixupimm_pd&expand=2487)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fixupimm_pd&expand=2487)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm256_fixupimm_pd<const IMM8: i32>(a: __m256d, b: __m256d, c: __m256i) -> __m256d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x4();
let b = b.as_f64x4();
let c = c.as_i64x4();
@@ -5801,7 +5801,7 @@ pub unsafe fn _mm256_fixupimm_pd<const IMM8: i32>(a: __m256d, b: __m256d, c: __m
/// Fix up packed double-precision (64-bit) floating-point elements in a and b using packed 64-bit integers in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fixupimm_pd&expand=2488)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_fixupimm_pd&expand=2488)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))]
@@ -5812,7 +5812,7 @@ pub unsafe fn _mm256_mask_fixupimm_pd<const IMM8: i32>(
b: __m256d,
c: __m256i,
) -> __m256d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x4();
let b = b.as_f64x4();
let c = c.as_i64x4();
@@ -5822,7 +5822,7 @@ pub unsafe fn _mm256_mask_fixupimm_pd<const IMM8: i32>(
/// Fix up packed double-precision (64-bit) floating-point elements in a and b using packed 64-bit integers in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fixupimm_pd&expand=2489)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_fixupimm_pd&expand=2489)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))]
@@ -5833,7 +5833,7 @@ pub unsafe fn _mm256_maskz_fixupimm_pd<const IMM8: i32>(
b: __m256d,
c: __m256i,
) -> __m256d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x4();
let b = b.as_f64x4();
let c = c.as_i64x4();
@@ -5843,13 +5843,13 @@ pub unsafe fn _mm256_maskz_fixupimm_pd<const IMM8: i32>(
/// Fix up packed double-precision (64-bit) floating-point elements in a and b using packed 64-bit integers in c, and store the results in dst. imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fixupimm_pd&expand=2484)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fixupimm_pd&expand=2484)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm_fixupimm_pd<const IMM8: i32>(a: __m128d, b: __m128d, c: __m128i) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x2();
let b = b.as_f64x2();
let c = c.as_i64x2();
@@ -5859,7 +5859,7 @@ pub unsafe fn _mm_fixupimm_pd<const IMM8: i32>(a: __m128d, b: __m128d, c: __m128
/// Fix up packed double-precision (64-bit) floating-point elements in a and b using packed 64-bit integers in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fixupimm_pd&expand=2485)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_fixupimm_pd&expand=2485)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))]
@@ -5870,7 +5870,7 @@ pub unsafe fn _mm_mask_fixupimm_pd<const IMM8: i32>(
b: __m128d,
c: __m128i,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x2();
let b = b.as_f64x2();
let c = c.as_i64x2();
@@ -5880,7 +5880,7 @@ pub unsafe fn _mm_mask_fixupimm_pd<const IMM8: i32>(
/// Fix up packed double-precision (64-bit) floating-point elements in a and b using packed 64-bit integers in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fixupimm_pd&expand=2486)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_fixupimm_pd&expand=2486)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0))]
@@ -5891,7 +5891,7 @@ pub unsafe fn _mm_maskz_fixupimm_pd<const IMM8: i32>(
b: __m128d,
c: __m128i,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x2();
let b = b.as_f64x2();
let c = c.as_i64x2();
@@ -5901,7 +5901,7 @@ pub unsafe fn _mm_maskz_fixupimm_pd<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 32-bit integer, the corresponding bit from a, b, and c are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_ternarylogic_epi32&expand=5867)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_ternarylogic_epi32&expand=5867)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))]
@@ -5911,7 +5911,7 @@ pub unsafe fn _mm512_ternarylogic_epi32<const IMM8: i32>(
b: __m512i,
c: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let b = b.as_i32x16();
let c = c.as_i32x16();
@@ -5921,7 +5921,7 @@ pub unsafe fn _mm512_ternarylogic_epi32<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 32-bit integer, the corresponding bit from src, a, and b are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst using writemask k at 32-bit granularity (32-bit elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_ternarylogic_epi32&expand=5865)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_ternarylogic_epi32&expand=5865)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))]
@@ -5932,7 +5932,7 @@ pub unsafe fn _mm512_mask_ternarylogic_epi32<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let src = src.as_i32x16();
let a = a.as_i32x16();
let b = b.as_i32x16();
@@ -5942,7 +5942,7 @@ pub unsafe fn _mm512_mask_ternarylogic_epi32<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 32-bit integer, the corresponding bit from a, b, and c are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst using zeromask k at 32-bit granularity (32-bit elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_ternarylogic_epi32&expand=5866)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_ternarylogic_epi32&expand=5866)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))]
@@ -5953,7 +5953,7 @@ pub unsafe fn _mm512_maskz_ternarylogic_epi32<const IMM8: i32>(
b: __m512i,
c: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let b = b.as_i32x16();
let c = c.as_i32x16();
@@ -5964,7 +5964,7 @@ pub unsafe fn _mm512_maskz_ternarylogic_epi32<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 32-bit integer, the corresponding bit from a, b, and c are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_ternarylogic_epi32&expand=5864)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_ternarylogic_epi32&expand=5864)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))]
@@ -5974,7 +5974,7 @@ pub unsafe fn _mm256_ternarylogic_epi32<const IMM8: i32>(
b: __m256i,
c: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x8();
let b = b.as_i32x8();
let c = c.as_i32x8();
@@ -5984,7 +5984,7 @@ pub unsafe fn _mm256_ternarylogic_epi32<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 32-bit integer, the corresponding bit from src, a, and b are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst using writemask k at 32-bit granularity (32-bit elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_ternarylogic_epi32&expand=5862)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_ternarylogic_epi32&expand=5862)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))]
@@ -5995,7 +5995,7 @@ pub unsafe fn _mm256_mask_ternarylogic_epi32<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let src = src.as_i32x8();
let a = a.as_i32x8();
let b = b.as_i32x8();
@@ -6005,7 +6005,7 @@ pub unsafe fn _mm256_mask_ternarylogic_epi32<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 32-bit integer, the corresponding bit from a, b, and c are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst using zeromask k at 32-bit granularity (32-bit elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_ternarylogic_epi32&expand=5863)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_ternarylogic_epi32&expand=5863)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))]
@@ -6016,7 +6016,7 @@ pub unsafe fn _mm256_maskz_ternarylogic_epi32<const IMM8: i32>(
b: __m256i,
c: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x8();
let b = b.as_i32x8();
let c = c.as_i32x8();
@@ -6027,7 +6027,7 @@ pub unsafe fn _mm256_maskz_ternarylogic_epi32<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 32-bit integer, the corresponding bit from a, b, and c are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ternarylogic_epi32&expand=5861)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ternarylogic_epi32&expand=5861)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))]
@@ -6037,7 +6037,7 @@ pub unsafe fn _mm_ternarylogic_epi32<const IMM8: i32>(
b: __m128i,
c: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x4();
let b = b.as_i32x4();
let c = c.as_i32x4();
@@ -6047,7 +6047,7 @@ pub unsafe fn _mm_ternarylogic_epi32<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 32-bit integer, the corresponding bit from src, a, and b are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst using writemask k at 32-bit granularity (32-bit elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_ternarylogic_epi32&expand=5859)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_ternarylogic_epi32&expand=5859)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))]
@@ -6058,7 +6058,7 @@ pub unsafe fn _mm_mask_ternarylogic_epi32<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let src = src.as_i32x4();
let a = a.as_i32x4();
let b = b.as_i32x4();
@@ -6068,7 +6068,7 @@ pub unsafe fn _mm_mask_ternarylogic_epi32<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 32-bit integer, the corresponding bit from a, b, and c are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst using zeromask k at 32-bit granularity (32-bit elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_ternarylogic_epi32&expand=5860)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_ternarylogic_epi32&expand=5860)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpternlogd, IMM8 = 114))]
@@ -6079,7 +6079,7 @@ pub unsafe fn _mm_maskz_ternarylogic_epi32<const IMM8: i32>(
b: __m128i,
c: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x4();
let b = b.as_i32x4();
let c = c.as_i32x4();
@@ -6090,7 +6090,7 @@ pub unsafe fn _mm_maskz_ternarylogic_epi32<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 64-bit integer, the corresponding bit from a, b, and c are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_ternarylogic_epi64&expand=5876)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_ternarylogic_epi64&expand=5876)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))]
@@ -6100,7 +6100,7 @@ pub unsafe fn _mm512_ternarylogic_epi64<const IMM8: i32>(
b: __m512i,
c: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let b = b.as_i64x8();
let c = c.as_i64x8();
@@ -6110,7 +6110,7 @@ pub unsafe fn _mm512_ternarylogic_epi64<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 64-bit integer, the corresponding bit from src, a, and b are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst using writemask k at 64-bit granularity (64-bit elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_ternarylogic_epi64&expand=5874)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_ternarylogic_epi64&expand=5874)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))]
@@ -6121,7 +6121,7 @@ pub unsafe fn _mm512_mask_ternarylogic_epi64<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let src = src.as_i64x8();
let a = a.as_i64x8();
let b = b.as_i64x8();
@@ -6131,7 +6131,7 @@ pub unsafe fn _mm512_mask_ternarylogic_epi64<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 64-bit integer, the corresponding bit from a, b, and c are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst using zeromask k at 64-bit granularity (64-bit elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_ternarylogic_epi64&expand=5875)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_ternarylogic_epi64&expand=5875)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))]
@@ -6142,7 +6142,7 @@ pub unsafe fn _mm512_maskz_ternarylogic_epi64<const IMM8: i32>(
b: __m512i,
c: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let b = b.as_i64x8();
let c = c.as_i64x8();
@@ -6153,7 +6153,7 @@ pub unsafe fn _mm512_maskz_ternarylogic_epi64<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 64-bit integer, the corresponding bit from a, b, and c are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_ternarylogic_epi64&expand=5873)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_ternarylogic_epi64&expand=5873)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))]
@@ -6163,7 +6163,7 @@ pub unsafe fn _mm256_ternarylogic_epi64<const IMM8: i32>(
b: __m256i,
c: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x4();
let b = b.as_i64x4();
let c = c.as_i64x4();
@@ -6173,7 +6173,7 @@ pub unsafe fn _mm256_ternarylogic_epi64<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 64-bit integer, the corresponding bit from src, a, and b are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst using writemask k at 64-bit granularity (64-bit elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_ternarylogic_epi64&expand=5871)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_ternarylogic_epi64&expand=5871)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))]
@@ -6184,7 +6184,7 @@ pub unsafe fn _mm256_mask_ternarylogic_epi64<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let src = src.as_i64x4();
let a = a.as_i64x4();
let b = b.as_i64x4();
@@ -6194,7 +6194,7 @@ pub unsafe fn _mm256_mask_ternarylogic_epi64<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 64-bit integer, the corresponding bit from a, b, and c are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst using zeromask k at 64-bit granularity (64-bit elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_ternarylogic_epi64&expand=5872)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_ternarylogic_epi64&expand=5872)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))]
@@ -6205,7 +6205,7 @@ pub unsafe fn _mm256_maskz_ternarylogic_epi64<const IMM8: i32>(
b: __m256i,
c: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x4();
let b = b.as_i64x4();
let c = c.as_i64x4();
@@ -6216,7 +6216,7 @@ pub unsafe fn _mm256_maskz_ternarylogic_epi64<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 64-bit integer, the corresponding bit from a, b, and c are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ternarylogic_epi64&expand=5870)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ternarylogic_epi64&expand=5870)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))]
@@ -6226,7 +6226,7 @@ pub unsafe fn _mm_ternarylogic_epi64<const IMM8: i32>(
b: __m128i,
c: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x2();
let b = b.as_i64x2();
let c = c.as_i64x2();
@@ -6236,7 +6236,7 @@ pub unsafe fn _mm_ternarylogic_epi64<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 64-bit integer, the corresponding bit from src, a, and b are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst using writemask k at 64-bit granularity (64-bit elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_ternarylogic_epi64&expand=5868)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_ternarylogic_epi64&expand=5868)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))]
@@ -6247,7 +6247,7 @@ pub unsafe fn _mm_mask_ternarylogic_epi64<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let src = src.as_i64x2();
let a = a.as_i64x2();
let b = b.as_i64x2();
@@ -6257,7 +6257,7 @@ pub unsafe fn _mm_mask_ternarylogic_epi64<const IMM8: i32>(
/// Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in imm8. For each bit in each packed 64-bit integer, the corresponding bit from a, b, and c are used to form a 3 bit index into imm8, and the value at that bit in imm8 is written to the corresponding bit in dst using zeromask k at 64-bit granularity (64-bit elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_ternarylogic_epi64&expand=5869)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_ternarylogic_epi64&expand=5869)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpternlogq, IMM8 = 114))]
@@ -6268,7 +6268,7 @@ pub unsafe fn _mm_maskz_ternarylogic_epi64<const IMM8: i32>(
b: __m128i,
c: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x2();
let b = b.as_i64x2();
let c = c.as_i64x2();
@@ -6288,7 +6288,7 @@ pub unsafe fn _mm_maskz_ternarylogic_epi64<const IMM8: i32>(
/// _MM_MANT_SIGN_zero // sign = 0
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getmant_ps&expand=2880)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getmant_ps&expand=2880)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))]
@@ -6299,8 +6299,8 @@ pub unsafe fn _mm512_getmant_ps<
>(
a: __m512,
) -> __m512 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f32x16();
let zero = _mm512_setzero_ps().as_f32x16();
let r = vgetmantps(
@@ -6324,7 +6324,7 @@ pub unsafe fn _mm512_getmant_ps<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getmant_ps&expand=2881)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getmant_ps&expand=2881)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))]
@@ -6337,8 +6337,8 @@ pub unsafe fn _mm512_mask_getmant_ps<
k: __mmask16,
a: __m512,
) -> __m512 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f32x16();
let src = src.as_f32x16();
let r = vgetmantps(a, SIGN << 2 | NORM, src, k, _MM_FROUND_CUR_DIRECTION);
@@ -6356,7 +6356,7 @@ pub unsafe fn _mm512_mask_getmant_ps<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getmant_ps&expand=2882)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getmant_ps&expand=2882)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))]
@@ -6368,8 +6368,8 @@ pub unsafe fn _mm512_maskz_getmant_ps<
k: __mmask16,
a: __m512,
) -> __m512 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f32x16();
let zero = _mm512_setzero_ps().as_f32x16();
let r = vgetmantps(a, SIGN << 2 | NORM, zero, k, _MM_FROUND_CUR_DIRECTION);
@@ -6387,7 +6387,7 @@ pub unsafe fn _mm512_maskz_getmant_ps<
/// _MM_MANT_SIGN_zero // sign = 0
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_getmant_ps&expand=2877)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_getmant_ps&expand=2877)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))]
@@ -6398,8 +6398,8 @@ pub unsafe fn _mm256_getmant_ps<
>(
a: __m256,
) -> __m256 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f32x8();
let zero = _mm256_setzero_ps().as_f32x8();
let r = vgetmantps256(a, SIGN << 2 | NORM, zero, 0b11111111);
@@ -6417,7 +6417,7 @@ pub unsafe fn _mm256_getmant_ps<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getmant_ps&expand=2878)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_getmant_ps&expand=2878)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))]
@@ -6430,8 +6430,8 @@ pub unsafe fn _mm256_mask_getmant_ps<
k: __mmask8,
a: __m256,
) -> __m256 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f32x8();
let src = src.as_f32x8();
let r = vgetmantps256(a, SIGN << 2 | NORM, src, k);
@@ -6449,7 +6449,7 @@ pub unsafe fn _mm256_mask_getmant_ps<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getmant_ps&expand=2879)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_getmant_ps&expand=2879)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))]
@@ -6461,8 +6461,8 @@ pub unsafe fn _mm256_maskz_getmant_ps<
k: __mmask8,
a: __m256,
) -> __m256 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f32x8();
let zero = _mm256_setzero_ps().as_f32x8();
let r = vgetmantps256(a, SIGN << 2 | NORM, zero, k);
@@ -6480,7 +6480,7 @@ pub unsafe fn _mm256_maskz_getmant_ps<
/// _MM_MANT_SIGN_zero // sign = 0
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_ps&expand=2874)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getmant_ps&expand=2874)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))]
@@ -6491,8 +6491,8 @@ pub unsafe fn _mm_getmant_ps<
>(
a: __m128,
) -> __m128 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f32x4();
let zero = _mm_setzero_ps().as_f32x4();
let r = vgetmantps128(a, SIGN << 2 | NORM, zero, 0b00001111);
@@ -6510,7 +6510,7 @@ pub unsafe fn _mm_getmant_ps<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_ps&expand=2875)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_getmant_ps&expand=2875)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))]
@@ -6523,8 +6523,8 @@ pub unsafe fn _mm_mask_getmant_ps<
k: __mmask8,
a: __m128,
) -> __m128 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f32x4();
let src = src.as_f32x4();
let r = vgetmantps128(a, SIGN << 2 | NORM, src, k);
@@ -6542,7 +6542,7 @@ pub unsafe fn _mm_mask_getmant_ps<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_ps&expand=2876)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_getmant_ps&expand=2876)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0))]
@@ -6554,8 +6554,8 @@ pub unsafe fn _mm_maskz_getmant_ps<
k: __mmask8,
a: __m128,
) -> __m128 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f32x4();
let zero = _mm_setzero_ps().as_f32x4();
let r = vgetmantps128(a, SIGN << 2 | NORM, zero, k);
@@ -6573,7 +6573,7 @@ pub unsafe fn _mm_maskz_getmant_ps<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getmant_pd&expand=2871)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getmant_pd&expand=2871)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))]
@@ -6584,8 +6584,8 @@ pub unsafe fn _mm512_getmant_pd<
>(
a: __m512d,
) -> __m512d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f64x8();
let zero = _mm512_setzero_pd().as_f64x8();
let r = vgetmantpd(
@@ -6609,7 +6609,7 @@ pub unsafe fn _mm512_getmant_pd<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getmant_pd&expand=2872)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getmant_pd&expand=2872)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))]
@@ -6622,8 +6622,8 @@ pub unsafe fn _mm512_mask_getmant_pd<
k: __mmask8,
a: __m512d,
) -> __m512d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f64x8();
let src = src.as_f64x8();
let r = vgetmantpd(a, SIGN << 2 | NORM, src, k, _MM_FROUND_CUR_DIRECTION);
@@ -6641,7 +6641,7 @@ pub unsafe fn _mm512_mask_getmant_pd<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getmant_pd&expand=2873)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getmant_pd&expand=2873)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))]
@@ -6653,8 +6653,8 @@ pub unsafe fn _mm512_maskz_getmant_pd<
k: __mmask8,
a: __m512d,
) -> __m512d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f64x8();
let zero = _mm512_setzero_pd().as_f64x8();
let r = vgetmantpd(a, SIGN << 2 | NORM, zero, k, _MM_FROUND_CUR_DIRECTION);
@@ -6672,7 +6672,7 @@ pub unsafe fn _mm512_maskz_getmant_pd<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_getmant_pd&expand=2868)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_getmant_pd&expand=2868)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))]
@@ -6683,8 +6683,8 @@ pub unsafe fn _mm256_getmant_pd<
>(
a: __m256d,
) -> __m256d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f64x4();
let zero = _mm256_setzero_pd().as_f64x4();
let r = vgetmantpd256(a, SIGN << 2 | NORM, zero, 0b00001111);
@@ -6702,7 +6702,7 @@ pub unsafe fn _mm256_getmant_pd<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getmant_pd&expand=2869)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_getmant_pd&expand=2869)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))]
@@ -6715,8 +6715,8 @@ pub unsafe fn _mm256_mask_getmant_pd<
k: __mmask8,
a: __m256d,
) -> __m256d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f64x4();
let src = src.as_f64x4();
let r = vgetmantpd256(a, SIGN << 2 | NORM, src, k);
@@ -6734,7 +6734,7 @@ pub unsafe fn _mm256_mask_getmant_pd<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getmant_pd&expand=2870)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_getmant_pd&expand=2870)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))]
@@ -6746,8 +6746,8 @@ pub unsafe fn _mm256_maskz_getmant_pd<
k: __mmask8,
a: __m256d,
) -> __m256d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f64x4();
let zero = _mm256_setzero_pd().as_f64x4();
let r = vgetmantpd256(a, SIGN << 2 | NORM, zero, k);
@@ -6765,7 +6765,7 @@ pub unsafe fn _mm256_maskz_getmant_pd<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_pd&expand=2865)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getmant_pd&expand=2865)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))]
@@ -6776,8 +6776,8 @@ pub unsafe fn _mm_getmant_pd<
>(
a: __m128d,
) -> __m128d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f64x2();
let zero = _mm_setzero_pd().as_f64x2();
let r = vgetmantpd128(a, SIGN << 2 | NORM, zero, 0b00000011);
@@ -6795,7 +6795,7 @@ pub unsafe fn _mm_getmant_pd<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_pd&expand=2866)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_getmant_pd&expand=2866)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))]
@@ -6808,8 +6808,8 @@ pub unsafe fn _mm_mask_getmant_pd<
k: __mmask8,
a: __m128d,
) -> __m128d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f64x2();
let src = src.as_f64x2();
let r = vgetmantpd128(a, SIGN << 2 | NORM, src, k);
@@ -6827,7 +6827,7 @@ pub unsafe fn _mm_mask_getmant_pd<
/// _MM_MANT_SIGN_zero // sign = 0\
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_pd&expand=2867)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_getmant_pd&expand=2867)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0))]
@@ -6839,8 +6839,8 @@ pub unsafe fn _mm_maskz_getmant_pd<
k: __mmask8,
a: __m128d,
) -> __m128d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f64x2();
let zero = _mm_setzero_pd().as_f64x2();
let r = vgetmantpd128(a, SIGN << 2 | NORM, zero, k);
@@ -6856,7 +6856,7 @@ pub unsafe fn _mm_maskz_getmant_pd<
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_add_round_ps&expand=145)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_round_ps&expand=145)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddps, ROUNDING = 8))]
@@ -6878,7 +6878,7 @@ pub unsafe fn _mm512_add_round_ps<const ROUNDING: i32>(a: __m512, b: __m512) ->
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_add_round_ps&expand=146)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_round_ps&expand=146)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddps, ROUNDING = 8))]
@@ -6905,7 +6905,7 @@ pub unsafe fn _mm512_mask_add_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_add_round_ps&expand=147)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_round_ps&expand=147)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddps, ROUNDING = 8))]
@@ -6932,7 +6932,7 @@ pub unsafe fn _mm512_maskz_add_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_add_round_pd&expand=142)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_add_round_pd&expand=142)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddpd, ROUNDING = 8))]
@@ -6954,7 +6954,7 @@ pub unsafe fn _mm512_add_round_pd<const ROUNDING: i32>(a: __m512d, b: __m512d) -
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_add_round_pd&expand=143)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_add_round_pd&expand=143)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddpd, ROUNDING = 8))]
@@ -6981,7 +6981,7 @@ pub unsafe fn _mm512_mask_add_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_add_round_pd&expand=144)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_add_round_pd&expand=144)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddpd, ROUNDING = 8))]
@@ -7008,7 +7008,7 @@ pub unsafe fn _mm512_maskz_add_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sub_round_ps&expand=5739)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_round_ps&expand=5739)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubps, ROUNDING = 8))]
@@ -7030,7 +7030,7 @@ pub unsafe fn _mm512_sub_round_ps<const ROUNDING: i32>(a: __m512, b: __m512) ->
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sub_round_ps&expand=5737)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_round_ps&expand=5737)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubps, ROUNDING = 8))]
@@ -7057,7 +7057,7 @@ pub unsafe fn _mm512_mask_sub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sub_round_ps&expand=5738)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_round_ps&expand=5738)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubps, ROUNDING = 8))]
@@ -7084,7 +7084,7 @@ pub unsafe fn _mm512_maskz_sub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sub_round_pd&expand=5736)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sub_round_pd&expand=5736)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubpd, ROUNDING = 8))]
@@ -7106,7 +7106,7 @@ pub unsafe fn _mm512_sub_round_pd<const ROUNDING: i32>(a: __m512d, b: __m512d) -
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sub_round_pd&expand=5734)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sub_round_pd&expand=5734)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubpd, ROUNDING = 8))]
@@ -7133,7 +7133,7 @@ pub unsafe fn _mm512_mask_sub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sub_round_pd&expand=5735)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sub_round_pd&expand=5735)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubpd, ROUNDING = 8))]
@@ -7160,7 +7160,7 @@ pub unsafe fn _mm512_maskz_sub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_round_ps&expand=3940)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mul_round_ps&expand=3940)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulps, ROUNDING = 8))]
@@ -7182,7 +7182,7 @@ pub unsafe fn _mm512_mul_round_ps<const ROUNDING: i32>(a: __m512, b: __m512) ->
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_round_ps&expand=3938)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mul_round_ps&expand=3938)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulps, ROUNDING = 8))]
@@ -7209,7 +7209,7 @@ pub unsafe fn _mm512_mask_mul_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_round_ps&expand=3939)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mul_round_ps&expand=3939)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulps, ROUNDING = 8))]
@@ -7236,7 +7236,7 @@ pub unsafe fn _mm512_maskz_mul_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_round_pd&expand=3937)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mul_round_pd&expand=3937)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulpd, ROUNDING = 8))]
@@ -7258,7 +7258,7 @@ pub unsafe fn _mm512_mul_round_pd<const ROUNDING: i32>(a: __m512d, b: __m512d) -
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_round_pd&expand=3935)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_mul_round_pd&expand=3935)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulpd, ROUNDING = 8))]
@@ -7285,7 +7285,7 @@ pub unsafe fn _mm512_mask_mul_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_round_ps&expand=3939)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_mul_round_ps&expand=3939)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulpd, ROUNDING = 8))]
@@ -7312,7 +7312,7 @@ pub unsafe fn _mm512_maskz_mul_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_div_round_ps&expand=2168)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_div_round_ps&expand=2168)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivps, ROUNDING = 8))]
@@ -7334,7 +7334,7 @@ pub unsafe fn _mm512_div_round_ps<const ROUNDING: i32>(a: __m512, b: __m512) ->
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_div_round_ps&expand=2169)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_div_round_ps&expand=2169)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivps, ROUNDING = 8))]
@@ -7361,7 +7361,7 @@ pub unsafe fn _mm512_mask_div_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_div_round_ps&expand=2170)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_div_round_ps&expand=2170)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivps, ROUNDING = 8))]
@@ -7388,7 +7388,7 @@ pub unsafe fn _mm512_maskz_div_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_div_round_pd&expand=2165)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_div_round_pd&expand=2165)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivpd, ROUNDING = 8))]
@@ -7410,7 +7410,7 @@ pub unsafe fn _mm512_div_round_pd<const ROUNDING: i32>(a: __m512d, b: __m512d) -
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_div_round_pd&expand=2166)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_div_round_pd&expand=2166)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivpd, ROUNDING = 8))]
@@ -7437,7 +7437,7 @@ pub unsafe fn _mm512_mask_div_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_div_round_pd&expand=2167)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_div_round_pd&expand=2167)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivpd, ROUNDING = 8))]
@@ -7464,7 +7464,7 @@ pub unsafe fn _mm512_maskz_div_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sqrt_round_ps&expand=5377)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sqrt_round_ps&expand=5377)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtps, ROUNDING = 8))]
@@ -7485,7 +7485,7 @@ pub unsafe fn _mm512_sqrt_round_ps<const ROUNDING: i32>(a: __m512) -> __m512 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_round_ps&expand=5375)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sqrt_round_ps&expand=5375)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtps, ROUNDING = 8))]
@@ -7510,7 +7510,7 @@ pub unsafe fn _mm512_mask_sqrt_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sqrt_round_ps&expand=5376)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sqrt_round_ps&expand=5376)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtps, ROUNDING = 8))]
@@ -7532,7 +7532,7 @@ pub unsafe fn _mm512_maskz_sqrt_round_ps<const ROUNDING: i32>(k: __mmask16, a: _
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sqrt_round_pd&expand=5374)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sqrt_round_pd&expand=5374)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtpd, ROUNDING = 8))]
@@ -7553,7 +7553,7 @@ pub unsafe fn _mm512_sqrt_round_pd<const ROUNDING: i32>(a: __m512d) -> __m512d {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_round_pd&expand=5372)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sqrt_round_pd&expand=5372)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtpd, ROUNDING = 8))]
@@ -7578,7 +7578,7 @@ pub unsafe fn _mm512_mask_sqrt_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sqrt_round_pd&expand=5373)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sqrt_round_pd&expand=5373)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtpd, ROUNDING = 8))]
@@ -7600,7 +7600,7 @@ pub unsafe fn _mm512_maskz_sqrt_round_pd<const ROUNDING: i32>(k: __mmask8, a: __
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_round_ps&expand=2565)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmadd_round_ps&expand=2565)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -7627,7 +7627,7 @@ pub unsafe fn _mm512_fmadd_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_round_ps&expand=2566)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmadd_round_ps&expand=2566)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -7655,7 +7655,7 @@ pub unsafe fn _mm512_mask_fmadd_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_round_ps&expand=2568)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmadd_round_ps&expand=2568)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -7684,7 +7684,7 @@ pub unsafe fn _mm512_maskz_fmadd_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_round_ps&expand=2567)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmadd_round_ps&expand=2567)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132ps or vfmadd213ps or vfmadd231ps
@@ -7712,7 +7712,7 @@ pub unsafe fn _mm512_mask3_fmadd_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_round_pd&expand=2561)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmadd_round_pd&expand=2561)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -7739,7 +7739,7 @@ pub unsafe fn _mm512_fmadd_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_round_pd&expand=2562)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmadd_round_pd&expand=2562)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -7767,7 +7767,7 @@ pub unsafe fn _mm512_mask_fmadd_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_round_pd&expand=2564)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmadd_round_pd&expand=2564)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -7796,7 +7796,7 @@ pub unsafe fn _mm512_maskz_fmadd_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_round_pd&expand=2563)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmadd_round_pd&expand=2563)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmadd132pd or vfmadd213pd or vfmadd231pd
@@ -7824,7 +7824,7 @@ pub unsafe fn _mm512_mask3_fmadd_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsub_round_ps&expand=2651)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsub_round_ps&expand=2651)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generates vfmadd, gcc generates vfmsub
@@ -7852,7 +7852,7 @@ pub unsafe fn _mm512_fmsub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsub_round_ps&expand=2652)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsub_round_ps&expand=2652)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generates vfmadd, gcc generates vfmsub
@@ -7881,7 +7881,7 @@ pub unsafe fn _mm512_mask_fmsub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsub_round_ps&expand=2654)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsub_round_ps&expand=2654)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generates vfmadd, gcc generates vfmsub
@@ -7910,7 +7910,7 @@ pub unsafe fn _mm512_maskz_fmsub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsub_round_ps&expand=2653)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsub_round_ps&expand=2653)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132ps or vfmsub213ps or vfmsub231ps, clang generates vfmadd, gcc generates vfmsub
@@ -7940,7 +7940,7 @@ pub unsafe fn _mm512_mask3_fmsub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsub_round_pd&expand=2647)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsub_round_pd&expand=2647)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang generates fmadd, gcc generates fmsub
@@ -7968,7 +7968,7 @@ pub unsafe fn _mm512_fmsub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsub_round_pd&expand=2648)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsub_round_pd&expand=2648)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang generates fmadd, gcc generates fmsub
@@ -7997,7 +7997,7 @@ pub unsafe fn _mm512_mask_fmsub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsub_round_pd&expand=2650)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsub_round_pd&expand=2650)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang generates fmadd, gcc generates fmsub
@@ -8026,7 +8026,7 @@ pub unsafe fn _mm512_maskz_fmsub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsub_round_pd&expand=2649)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsub_round_pd&expand=2649)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfmsub132pd or vfmsub213pd or vfmsub231pd. clang generates fmadd, gcc generates fmsub
@@ -8056,7 +8056,7 @@ pub unsafe fn _mm512_mask3_fmsub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmaddsub_round_ps&expand=2619)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmaddsub_round_ps&expand=2619)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -8083,7 +8083,7 @@ pub unsafe fn _mm512_fmaddsub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmaddsub_round_ps&expand=2620)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmaddsub_round_ps&expand=2620)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -8111,7 +8111,7 @@ pub unsafe fn _mm512_mask_fmaddsub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmaddsub_round_ps&expand=2622)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmaddsub_round_ps&expand=2622)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -8140,7 +8140,7 @@ pub unsafe fn _mm512_maskz_fmaddsub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmaddsub_round_ps&expand=2621)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmaddsub_round_ps&expand=2621)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132ps or vfmaddsub213ps or vfmaddsub231ps
@@ -8168,7 +8168,7 @@ pub unsafe fn _mm512_mask3_fmaddsub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmaddsub_round_pd&expand=2615)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmaddsub_round_pd&expand=2615)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -8195,7 +8195,7 @@ pub unsafe fn _mm512_fmaddsub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmaddsub_round_pd&expand=2616)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmaddsub_round_pd&expand=2616)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -8223,7 +8223,7 @@ pub unsafe fn _mm512_mask_fmaddsub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmaddsub_round_pd&expand=2618)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmaddsub_round_pd&expand=2618)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -8252,7 +8252,7 @@ pub unsafe fn _mm512_maskz_fmaddsub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmaddsub_round_pd&expand=2617)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmaddsub_round_pd&expand=2617)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmaddsub132pd or vfmaddsub213pd or vfmaddsub231pd
@@ -8280,7 +8280,7 @@ pub unsafe fn _mm512_mask3_fmaddsub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsubadd_round_ps&expand=2699)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsubadd_round_ps&expand=2699)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -8308,7 +8308,7 @@ pub unsafe fn _mm512_fmsubadd_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsubadd_round_ps&expand=2700)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsubadd_round_ps&expand=2700)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -8337,7 +8337,7 @@ pub unsafe fn _mm512_mask_fmsubadd_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsubadd_round_ps&expand=2702)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsubadd_round_ps&expand=2702)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -8366,7 +8366,7 @@ pub unsafe fn _mm512_maskz_fmsubadd_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsubadd_round_ps&expand=2701)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsubadd_round_ps&expand=2701)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132ps or vfmsubadd213ps or vfmsubadd231ps
@@ -8396,7 +8396,7 @@ pub unsafe fn _mm512_mask3_fmsubadd_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsubadd_round_pd&expand=2695)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fmsubadd_round_pd&expand=2695)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -8424,7 +8424,7 @@ pub unsafe fn _mm512_fmsubadd_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsubadd_round_pd&expand=2696)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fmsubadd_round_pd&expand=2696)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -8453,7 +8453,7 @@ pub unsafe fn _mm512_mask_fmsubadd_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsubadd_round_pd&expand=2698)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fmsubadd_round_pd&expand=2698)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -8482,7 +8482,7 @@ pub unsafe fn _mm512_maskz_fmsubadd_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsubadd_round_pd&expand=2697)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fmsubadd_round_pd&expand=2697)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] //vfmsubadd132pd or vfmsubadd213pd or vfmsubadd231pd
@@ -8512,7 +8512,7 @@ pub unsafe fn _mm512_mask3_fmsubadd_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmadd_round_ps&expand=2731)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmadd_round_ps&expand=2731)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -8540,7 +8540,7 @@ pub unsafe fn _mm512_fnmadd_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmadd_round_ps&expand=2732)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmadd_round_ps&expand=2732)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -8569,7 +8569,7 @@ pub unsafe fn _mm512_mask_fnmadd_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmadd_round_ps&expand=2734)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmadd_round_ps&expand=2734)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -8598,7 +8598,7 @@ pub unsafe fn _mm512_maskz_fnmadd_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmadd_round_ps&expand=2733)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmadd_round_ps&expand=2733)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132ps or vfnmadd213ps or vfnmadd231ps
@@ -8627,7 +8627,7 @@ pub unsafe fn _mm512_mask3_fnmadd_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmadd_pd&expand=2711)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmadd_pd&expand=2711)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -8655,7 +8655,7 @@ pub unsafe fn _mm512_fnmadd_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmadd_round_pd&expand=2728)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmadd_round_pd&expand=2728)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -8685,7 +8685,7 @@ pub unsafe fn _mm512_mask_fnmadd_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmadd_round_pd&expand=2730)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmadd_round_pd&expand=2730)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -8714,7 +8714,7 @@ pub unsafe fn _mm512_maskz_fnmadd_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmadd_round_pd&expand=2729)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmadd_round_pd&expand=2729)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmadd132pd or vfnmadd213pd or vfnmadd231pd
@@ -8743,7 +8743,7 @@ pub unsafe fn _mm512_mask3_fnmadd_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmsub_round_ps&expand=2779)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmsub_round_ps&expand=2779)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -8771,7 +8771,7 @@ pub unsafe fn _mm512_fnmsub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmsub_round_ps&expand=2780)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmsub_round_ps&expand=2780)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -8801,7 +8801,7 @@ pub unsafe fn _mm512_mask_fnmsub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmsub_round_ps&expand=2782)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmsub_round_ps&expand=2782)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -8830,7 +8830,7 @@ pub unsafe fn _mm512_maskz_fnmsub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmsub_round_ps&expand=2781)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmsub_round_ps&expand=2781)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132ps or vfnmsub213ps or vfnmsub231ps
@@ -8860,7 +8860,7 @@ pub unsafe fn _mm512_mask3_fnmsub_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmsub_round_pd&expand=2775)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fnmsub_round_pd&expand=2775)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -8888,7 +8888,7 @@ pub unsafe fn _mm512_fnmsub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmsub_round_pd&expand=2776)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fnmsub_round_pd&expand=2776)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -8918,7 +8918,7 @@ pub unsafe fn _mm512_mask_fnmsub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmsub_round_pd&expand=2778)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fnmsub_round_pd&expand=2778)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -8947,7 +8947,7 @@ pub unsafe fn _mm512_maskz_fnmsub_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmsub_round_pd&expand=2777)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask3_fnmsub_round_pd&expand=2777)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] //vfnmsub132pd or vfnmsub213pd or vfnmsub231pd
@@ -8971,7 +8971,7 @@ pub unsafe fn _mm512_mask3_fnmsub_round_pd<const ROUNDING: i32>(
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed maximum values in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_round_ps&expand=3662)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_round_ps&expand=3662)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxps, SAE = 8))]
@@ -8987,7 +8987,7 @@ pub unsafe fn _mm512_max_round_ps<const SAE: i32>(a: __m512, b: __m512) -> __m51
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_round_ps&expand=3660)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_round_ps&expand=3660)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxps, SAE = 8))]
@@ -9008,7 +9008,7 @@ pub unsafe fn _mm512_mask_max_round_ps<const SAE: i32>(
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_round_ps&expand=3661)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_round_ps&expand=3661)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxps, SAE = 8))]
@@ -9029,7 +9029,7 @@ pub unsafe fn _mm512_maskz_max_round_ps<const SAE: i32>(
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed maximum values in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_round_pd&expand=3659)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_max_round_pd&expand=3659)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxpd, SAE = 8))]
@@ -9045,7 +9045,7 @@ pub unsafe fn _mm512_max_round_pd<const SAE: i32>(a: __m512d, b: __m512d) -> __m
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed maximum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_round_pd&expand=3657)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_max_round_pd&expand=3657)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxpd, SAE = 8))]
@@ -9066,7 +9066,7 @@ pub unsafe fn _mm512_mask_max_round_pd<const SAE: i32>(
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed maximum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_round_pd&expand=3658)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_max_round_pd&expand=3658)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxpd, SAE = 8))]
@@ -9087,7 +9087,7 @@ pub unsafe fn _mm512_maskz_max_round_pd<const SAE: i32>(
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed minimum values in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_round_ps&expand=3776)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_round_ps&expand=3776)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminps, SAE = 8))]
@@ -9103,7 +9103,7 @@ pub unsafe fn _mm512_min_round_ps<const SAE: i32>(a: __m512, b: __m512) -> __m51
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_round_ps&expand=3774)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_round_ps&expand=3774)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminps, SAE = 8))]
@@ -9124,7 +9124,7 @@ pub unsafe fn _mm512_mask_min_round_ps<const SAE: i32>(
/// Compare packed single-precision (32-bit) floating-point elements in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_round_ps&expand=3775)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_round_ps&expand=3775)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminps, SAE = 8))]
@@ -9145,7 +9145,7 @@ pub unsafe fn _mm512_maskz_min_round_ps<const SAE: i32>(
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed minimum values in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_round_pd&expand=3773)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_min_round_pd&expand=3773)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminpd, SAE = 8))]
@@ -9161,7 +9161,7 @@ pub unsafe fn _mm512_min_round_pd<const SAE: i32>(a: __m512d, b: __m512d) -> __m
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed minimum values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_round_pd&expand=3771)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_min_round_pd&expand=3771)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminpd, SAE = 8))]
@@ -9182,7 +9182,7 @@ pub unsafe fn _mm512_mask_min_round_pd<const SAE: i32>(
/// Compare packed double-precision (64-bit) floating-point elements in a and b, and store packed minimum values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_round_pd&expand=3772)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_min_round_pd&expand=3772)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminpd, SAE = 8))]
@@ -9203,7 +9203,7 @@ pub unsafe fn _mm512_maskz_min_round_pd<const SAE: i32>(
/// Convert the exponent of each packed single-precision (32-bit) floating-point element in a to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in dst. This intrinsic essentially calculates floor(log2(x)) for each element.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getexp_round_ps&expand=2850)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getexp_round_ps&expand=2850)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpps, SAE = 8))]
@@ -9219,7 +9219,7 @@ pub unsafe fn _mm512_getexp_round_ps<const SAE: i32>(a: __m512) -> __m512 {
/// Convert the exponent of each packed single-precision (32-bit) floating-point element in a to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getexp_round_ps&expand=2851)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getexp_round_ps&expand=2851)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpps, SAE = 8))]
@@ -9239,7 +9239,7 @@ pub unsafe fn _mm512_mask_getexp_round_ps<const SAE: i32>(
/// Convert the exponent of each packed single-precision (32-bit) floating-point element in a to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getexp_round_ps&expand=2852)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getexp_round_ps&expand=2852)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpps, SAE = 8))]
@@ -9255,7 +9255,7 @@ pub unsafe fn _mm512_maskz_getexp_round_ps<const SAE: i32>(k: __mmask16, a: __m5
/// Convert the exponent of each packed double-precision (64-bit) floating-point element in a to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in dst. This intrinsic essentially calculates floor(log2(x)) for each element.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getexp_round_pd&expand=2847)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getexp_round_pd&expand=2847)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexppd, SAE = 8))]
@@ -9271,7 +9271,7 @@ pub unsafe fn _mm512_getexp_round_pd<const SAE: i32>(a: __m512d) -> __m512d {
/// Convert the exponent of each packed double-precision (64-bit) floating-point element in a to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getexp_round_pd&expand=2848)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getexp_round_pd&expand=2848)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexppd, SAE = 8))]
@@ -9291,7 +9291,7 @@ pub unsafe fn _mm512_mask_getexp_round_pd<const SAE: i32>(
/// Convert the exponent of each packed double-precision (64-bit) floating-point element in a to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates floor(log2(x)) for each element.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getexp_round_pd&expand=2849)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getexp_round_pd&expand=2849)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexppd, SAE = 8))]
@@ -9313,13 +9313,13 @@ pub unsafe fn _mm512_maskz_getexp_round_pd<const SAE: i32>(k: __mmask8, a: __m51
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_roundscale_round_ps&expand=4790)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_roundscale_round_ps&expand=4790)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(1, 2)]
pub unsafe fn _mm512_roundscale_round_ps<const IMM8: i32, const SAE: i32>(a: __m512) -> __m512 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x16();
let zero = _mm512_setzero_ps().as_f32x16();
@@ -9336,7 +9336,7 @@ pub unsafe fn _mm512_roundscale_round_ps<const IMM8: i32, const SAE: i32>(a: __m
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_roundscale_round_ps&expand=4788)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_roundscale_round_ps&expand=4788)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0, SAE = 8))]
@@ -9346,7 +9346,7 @@ pub unsafe fn _mm512_mask_roundscale_round_ps<const IMM8: i32, const SAE: i32>(
k: __mmask16,
a: __m512,
) -> __m512 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x16();
let src = src.as_f32x16();
@@ -9363,7 +9363,7 @@ pub unsafe fn _mm512_mask_roundscale_round_ps<const IMM8: i32, const SAE: i32>(
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_roundscale_round_ps&expand=4789)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_roundscale_round_ps&expand=4789)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscaleps, IMM8 = 0, SAE = 8))]
@@ -9372,7 +9372,7 @@ pub unsafe fn _mm512_maskz_roundscale_round_ps<const IMM8: i32, const SAE: i32>(
k: __mmask16,
a: __m512,
) -> __m512 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x16();
let zero = _mm512_setzero_ps().as_f32x16();
@@ -9389,13 +9389,13 @@ pub unsafe fn _mm512_maskz_roundscale_round_ps<const IMM8: i32, const SAE: i32>(
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_roundscale_round_pd&expand=4787)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_roundscale_round_pd&expand=4787)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0, SAE = 8))]
#[rustc_legacy_const_generics(1, 2)]
pub unsafe fn _mm512_roundscale_round_pd<const IMM8: i32, const SAE: i32>(a: __m512d) -> __m512d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x8();
let zero = _mm512_setzero_pd().as_f64x8();
@@ -9412,7 +9412,7 @@ pub unsafe fn _mm512_roundscale_round_pd<const IMM8: i32, const SAE: i32>(a: __m
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_roundscale_round_pd&expand=4785)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_roundscale_round_pd&expand=4785)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0, SAE = 8))]
@@ -9422,7 +9422,7 @@ pub unsafe fn _mm512_mask_roundscale_round_pd<const IMM8: i32, const SAE: i32>(
k: __mmask8,
a: __m512d,
) -> __m512d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x8();
let src = src.as_f64x8();
@@ -9439,7 +9439,7 @@ pub unsafe fn _mm512_mask_roundscale_round_pd<const IMM8: i32, const SAE: i32>(
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_roundscale_round_pd&expand=4786)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_roundscale_round_pd&expand=4786)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscalepd, IMM8 = 0, SAE = 8))]
@@ -9448,7 +9448,7 @@ pub unsafe fn _mm512_maskz_roundscale_round_pd<const IMM8: i32, const SAE: i32>(
k: __mmask8,
a: __m512d,
) -> __m512d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x8();
let zero = _mm512_setzero_pd().as_f64x8();
@@ -9465,7 +9465,7 @@ pub unsafe fn _mm512_maskz_roundscale_round_pd<const IMM8: i32, const SAE: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_scalef_round_ps&expand=4889)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_scalef_round_ps&expand=4889)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefps, ROUNDING = 8))]
@@ -9488,7 +9488,7 @@ pub unsafe fn _mm512_scalef_round_ps<const ROUNDING: i32>(a: __m512, b: __m512)
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_round_ps&expand=4887)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_scalef_round_ps&expand=4887)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefps, ROUNDING = 8))]
@@ -9516,7 +9516,7 @@ pub unsafe fn _mm512_mask_scalef_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_scalef_round_ps&expand=4888)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_scalef_round_ps&expand=4888)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefps, ROUNDING = 8))]
@@ -9543,7 +9543,7 @@ pub unsafe fn _mm512_maskz_scalef_round_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_scalef_round_pd&expand=4886)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_scalef_round_pd&expand=4886)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefpd, ROUNDING = 8))]
@@ -9566,7 +9566,7 @@ pub unsafe fn _mm512_scalef_round_pd<const ROUNDING: i32>(a: __m512d, b: __m512d
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_round_pd&expand=4884)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_scalef_round_pd&expand=4884)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefpd, ROUNDING = 8))]
@@ -9594,7 +9594,7 @@ pub unsafe fn _mm512_mask_scalef_round_pd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_scalef_round_pd&expand=4885)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_scalef_round_pd&expand=4885)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefpd, ROUNDING = 8))]
@@ -9615,7 +9615,7 @@ pub unsafe fn _mm512_maskz_scalef_round_pd<const ROUNDING: i32>(
/// Fix up packed single-precision (32-bit) floating-point elements in a and b using packed 32-bit integers in c, and store the results in dst. imm8 is used to set the required flags reporting.\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fixupimm_round_ps&expand=2505)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fixupimm_round_ps&expand=2505)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0, SAE = 8))]
@@ -9625,7 +9625,7 @@ pub unsafe fn _mm512_fixupimm_round_ps<const IMM8: i32, const SAE: i32>(
b: __m512,
c: __m512i,
) -> __m512 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x16();
let b = b.as_f32x16();
@@ -9637,7 +9637,7 @@ pub unsafe fn _mm512_fixupimm_round_ps<const IMM8: i32, const SAE: i32>(
/// Fix up packed single-precision (32-bit) floating-point elements in a and b using packed 32-bit integers in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fixupimm_round_ps&expand=2506)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fixupimm_round_ps&expand=2506)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0, SAE = 8))]
@@ -9648,7 +9648,7 @@ pub unsafe fn _mm512_mask_fixupimm_round_ps<const IMM8: i32, const SAE: i32>(
b: __m512,
c: __m512i,
) -> __m512 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x16();
let b = b.as_f32x16();
@@ -9660,7 +9660,7 @@ pub unsafe fn _mm512_mask_fixupimm_round_ps<const IMM8: i32, const SAE: i32>(
/// Fix up packed single-precision (32-bit) floating-point elements in a and b using packed 32-bit integers in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fixupimm_round_ps&expand=2507)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fixupimm_round_ps&expand=2507)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmps, IMM8 = 0, SAE = 8))]
@@ -9671,7 +9671,7 @@ pub unsafe fn _mm512_maskz_fixupimm_round_ps<const IMM8: i32, const SAE: i32>(
b: __m512,
c: __m512i,
) -> __m512 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x16();
let b = b.as_f32x16();
@@ -9683,7 +9683,7 @@ pub unsafe fn _mm512_maskz_fixupimm_round_ps<const IMM8: i32, const SAE: i32>(
/// Fix up packed double-precision (64-bit) floating-point elements in a and b using packed 64-bit integers in c, and store the results in dst. imm8 is used to set the required flags reporting.\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fixupimm_round_pd&expand=2502)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_fixupimm_round_pd&expand=2502)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0, SAE = 8))]
@@ -9693,7 +9693,7 @@ pub unsafe fn _mm512_fixupimm_round_pd<const IMM8: i32, const SAE: i32>(
b: __m512d,
c: __m512i,
) -> __m512d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x8();
let b = b.as_f64x8();
@@ -9705,7 +9705,7 @@ pub unsafe fn _mm512_fixupimm_round_pd<const IMM8: i32, const SAE: i32>(
/// Fix up packed double-precision (64-bit) floating-point elements in a and b using packed 64-bit integers in c, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fixupimm_round_pd&expand=2503)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_fixupimm_round_pd&expand=2503)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0, SAE = 8))]
@@ -9716,7 +9716,7 @@ pub unsafe fn _mm512_mask_fixupimm_round_pd<const IMM8: i32, const SAE: i32>(
b: __m512d,
c: __m512i,
) -> __m512d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x8();
let b = b.as_f64x8();
@@ -9728,7 +9728,7 @@ pub unsafe fn _mm512_mask_fixupimm_round_pd<const IMM8: i32, const SAE: i32>(
/// Fix up packed double-precision (64-bit) floating-point elements in a and b using packed 64-bit integers in c, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). imm8 is used to set the required flags reporting.\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fixupimm_round_pd&expand=2504)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_fixupimm_round_pd&expand=2504)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmpd, IMM8 = 0, SAE = 8))]
@@ -9739,7 +9739,7 @@ pub unsafe fn _mm512_maskz_fixupimm_round_pd<const IMM8: i32, const SAE: i32>(
b: __m512d,
c: __m512i,
) -> __m512d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x8();
let b = b.as_f64x8();
@@ -9760,7 +9760,7 @@ pub unsafe fn _mm512_maskz_fixupimm_round_pd<const IMM8: i32, const SAE: i32>(
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getmant_round_ps&expand=2886)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getmant_round_ps&expand=2886)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0, SAE = 4))]
@@ -9772,8 +9772,8 @@ pub unsafe fn _mm512_getmant_round_ps<
>(
a: __m512,
) -> __m512 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x16();
let zero = _mm512_setzero_ps().as_f32x16();
@@ -9793,7 +9793,7 @@ pub unsafe fn _mm512_getmant_round_ps<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getmant_round_ps&expand=2887)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getmant_round_ps&expand=2887)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0, SAE = 4))]
@@ -9807,8 +9807,8 @@ pub unsafe fn _mm512_mask_getmant_round_ps<
k: __mmask16,
a: __m512,
) -> __m512 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x16();
let src = src.as_f32x16();
@@ -9828,7 +9828,7 @@ pub unsafe fn _mm512_mask_getmant_round_ps<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getmant_round_ps&expand=2888)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_getmant_round_ps&expand=2888)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantps, NORM = 0, SIGN = 0, SAE = 4))]
@@ -9841,8 +9841,8 @@ pub unsafe fn _mm512_maskz_getmant_round_ps<
k: __mmask16,
a: __m512,
) -> __m512 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x16();
let zero = _mm512_setzero_ps().as_f32x16();
@@ -9862,7 +9862,7 @@ pub unsafe fn _mm512_maskz_getmant_round_ps<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getmant_round_pd&expand=2883)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_getmant_round_pd&expand=2883)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0, SAE = 4))]
@@ -9874,8 +9874,8 @@ pub unsafe fn _mm512_getmant_round_pd<
>(
a: __m512d,
) -> __m512d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x8();
let zero = _mm512_setzero_pd().as_f64x8();
@@ -9895,7 +9895,7 @@ pub unsafe fn _mm512_getmant_round_pd<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getmant_round_pd&expand=2884)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_getmant_round_pd&expand=2884)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0, SAE = 4))]
@@ -9909,8 +9909,8 @@ pub unsafe fn _mm512_mask_getmant_round_pd<
k: __mmask8,
a: __m512d,
) -> __m512d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x8();
let src = src.as_f64x8();
@@ -9930,7 +9930,7 @@ pub unsafe fn _mm512_mask_getmant_round_pd<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_512_maskz_getmant_round_pd&expand=2885)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_512_maskz_getmant_round_pd&expand=2885)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantpd, NORM = 0, SIGN = 0, SAE = 4))]
@@ -9943,8 +9943,8 @@ pub unsafe fn _mm512_maskz_getmant_round_pd<
k: __mmask8,
a: __m512d,
) -> __m512d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x8();
let zero = _mm512_setzero_pd().as_f64x8();
@@ -9954,7 +9954,7 @@ pub unsafe fn _mm512_maskz_getmant_round_pd<
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtps_epi32&expand=1737)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtps_epi32&expand=1737)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2dq))]
@@ -9969,7 +9969,7 @@ pub unsafe fn _mm512_cvtps_epi32(a: __m512) -> __m512i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtps_epi32&expand=1738)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtps_epi32&expand=1738)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2dq))]
@@ -9984,7 +9984,7 @@ pub unsafe fn _mm512_mask_cvtps_epi32(src: __m512i, k: __mmask16, a: __m512) ->
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtps_epi32&expand=1739)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtps_epi32&expand=1739)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2dq))]
@@ -9999,7 +9999,7 @@ pub unsafe fn _mm512_maskz_cvtps_epi32(k: __mmask16, a: __m512) -> __m512i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtps_epi32&expand=1735)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtps_epi32&expand=1735)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2dq))]
@@ -10010,7 +10010,7 @@ pub unsafe fn _mm256_mask_cvtps_epi32(src: __m256i, k: __mmask8, a: __m256) -> _
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtps_epi32&expand=1736)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtps_epi32&expand=1736)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2dq))]
@@ -10022,7 +10022,7 @@ pub unsafe fn _mm256_maskz_cvtps_epi32(k: __mmask8, a: __m256) -> __m256i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtps_epi32&expand=1732)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtps_epi32&expand=1732)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2dq))]
@@ -10033,7 +10033,7 @@ pub unsafe fn _mm_mask_cvtps_epi32(src: __m128i, k: __mmask8, a: __m128) -> __m1
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtps_epi32&expand=1733)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtps_epi32&expand=1733)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2dq))]
@@ -10045,7 +10045,7 @@ pub unsafe fn _mm_maskz_cvtps_epi32(k: __mmask8, a: __m128) -> __m128i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtps_epu32&expand=1755)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtps_epu32&expand=1755)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2udq))]
@@ -10060,7 +10060,7 @@ pub unsafe fn _mm512_cvtps_epu32(a: __m512) -> __m512i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtps_epu32&expand=1756)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtps_epu32&expand=1756)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2udq))]
@@ -10075,7 +10075,7 @@ pub unsafe fn _mm512_mask_cvtps_epu32(src: __m512i, k: __mmask16, a: __m512) ->
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundps_epu32&expand=1343)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundps_epu32&expand=1343)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2udq))]
@@ -10090,7 +10090,7 @@ pub unsafe fn _mm512_maskz_cvtps_epu32(k: __mmask16, a: __m512) -> __m512i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtps_epu32&expand=1752)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtps_epu32&expand=1752)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2udq))]
@@ -10104,7 +10104,7 @@ pub unsafe fn _mm256_cvtps_epu32(a: __m256) -> __m256i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtps_epu32&expand=1753)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtps_epu32&expand=1753)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2udq))]
@@ -10114,7 +10114,7 @@ pub unsafe fn _mm256_mask_cvtps_epu32(src: __m256i, k: __mmask8, a: __m256) -> _
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtps_epu32&expand=1754)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtps_epu32&expand=1754)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2udq))]
@@ -10128,7 +10128,7 @@ pub unsafe fn _mm256_maskz_cvtps_epu32(k: __mmask8, a: __m256) -> __m256i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_epu32&expand=1749)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_epu32&expand=1749)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2udq))]
@@ -10142,7 +10142,7 @@ pub unsafe fn _mm_cvtps_epu32(a: __m128) -> __m128i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtps_epu32&expand=1750)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtps_epu32&expand=1750)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2udq))]
@@ -10152,7 +10152,7 @@ pub unsafe fn _mm_mask_cvtps_epu32(src: __m128i, k: __mmask8, a: __m128) -> __m1
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtps_epu32&expand=1751)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtps_epu32&expand=1751)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2udq))]
@@ -10166,7 +10166,7 @@ pub unsafe fn _mm_maskz_cvtps_epu32(k: __mmask8, a: __m128) -> __m128i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed double-precision (64-bit) floating-point elements, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtps_pd&expand=1769)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtps_pd&expand=1769)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2pd))]
@@ -10181,7 +10181,7 @@ pub unsafe fn _mm512_cvtps_pd(a: __m256) -> __m512d {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtps_pd&expand=1770)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtps_pd&expand=1770)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2pd))]
@@ -10196,7 +10196,7 @@ pub unsafe fn _mm512_mask_cvtps_pd(src: __m512d, k: __mmask8, a: __m256) -> __m5
/// Convert packed single-precision (32-bit) floating-point elements in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtps_pd&expand=1771)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtps_pd&expand=1771)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2pd))]
@@ -10211,7 +10211,7 @@ pub unsafe fn _mm512_maskz_cvtps_pd(k: __mmask8, a: __m256) -> __m512d {
/// Performs element-by-element conversion of the lower half of packed single-precision (32-bit) floating-point elements in v2 to packed double-precision (64-bit) floating-point elements, storing the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtpslo_pd&expand=1784)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtpslo_pd&expand=1784)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2pd))]
@@ -10226,7 +10226,7 @@ pub unsafe fn _mm512_cvtpslo_pd(v2: __m512) -> __m512d {
/// Performs element-by-element conversion of the lower half of packed single-precision (32-bit) floating-point elements in v2 to packed double-precision (64-bit) floating-point elements, storing the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtpslo_pd&expand=1785)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtpslo_pd&expand=1785)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2pd))]
@@ -10241,7 +10241,7 @@ pub unsafe fn _mm512_mask_cvtpslo_pd(src: __m512d, k: __mmask8, v2: __m512) -> _
/// Convert packed double-precision (64-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtpd_ps&expand=1712)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtpd_ps&expand=1712)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2ps))]
@@ -10256,7 +10256,7 @@ pub unsafe fn _mm512_cvtpd_ps(a: __m512d) -> __m256 {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtpd_ps&expand=1713)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtpd_ps&expand=1713)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2ps))]
@@ -10271,7 +10271,7 @@ pub unsafe fn _mm512_mask_cvtpd_ps(src: __m256, k: __mmask8, a: __m512d) -> __m2
/// Convert packed double-precision (64-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtpd_ps&expand=1714)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtpd_ps&expand=1714)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2ps))]
@@ -10286,7 +10286,7 @@ pub unsafe fn _mm512_maskz_cvtpd_ps(k: __mmask8, a: __m512d) -> __m256 {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtpd_ps&expand=1710)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtpd_ps&expand=1710)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2ps))]
@@ -10297,7 +10297,7 @@ pub unsafe fn _mm256_mask_cvtpd_ps(src: __m128, k: __mmask8, a: __m256d) -> __m1
/// Convert packed double-precision (64-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtpd_ps&expand=1711)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtpd_ps&expand=1711)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2ps))]
@@ -10309,7 +10309,7 @@ pub unsafe fn _mm256_maskz_cvtpd_ps(k: __mmask8, a: __m256d) -> __m128 {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtpd_ps&expand=1707)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtpd_ps&expand=1707)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2ps))]
@@ -10320,7 +10320,7 @@ pub unsafe fn _mm_mask_cvtpd_ps(src: __m128, k: __mmask8, a: __m128d) -> __m128
/// Convert packed double-precision (64-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtpd_ps&expand=1708)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtpd_ps&expand=1708)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2ps))]
@@ -10332,7 +10332,7 @@ pub unsafe fn _mm_maskz_cvtpd_ps(k: __mmask8, a: __m128d) -> __m128 {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtpd_epi32&expand=1675)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtpd_epi32&expand=1675)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2dq))]
@@ -10347,7 +10347,7 @@ pub unsafe fn _mm512_cvtpd_epi32(a: __m512d) -> __m256i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtpd_epi32&expand=1676)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtpd_epi32&expand=1676)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2dq))]
@@ -10362,7 +10362,7 @@ pub unsafe fn _mm512_mask_cvtpd_epi32(src: __m256i, k: __mmask8, a: __m512d) ->
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtpd_epi32&expand=1677)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtpd_epi32&expand=1677)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2dq))]
@@ -10377,7 +10377,7 @@ pub unsafe fn _mm512_maskz_cvtpd_epi32(k: __mmask8, a: __m512d) -> __m256i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtpd_epi32&expand=1673)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtpd_epi32&expand=1673)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2dq))]
@@ -10388,7 +10388,7 @@ pub unsafe fn _mm256_mask_cvtpd_epi32(src: __m128i, k: __mmask8, a: __m256d) ->
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtpd_epi32&expand=1674)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtpd_epi32&expand=1674)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2dq))]
@@ -10403,7 +10403,7 @@ pub unsafe fn _mm256_maskz_cvtpd_epi32(k: __mmask8, a: __m256d) -> __m128i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtpd_epi32&expand=1670)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtpd_epi32&expand=1670)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2dq))]
@@ -10414,7 +10414,7 @@ pub unsafe fn _mm_mask_cvtpd_epi32(src: __m128i, k: __mmask8, a: __m128d) -> __m
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtpd_epi32&expand=1671)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtpd_epi32&expand=1671)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2dq))]
@@ -10429,7 +10429,7 @@ pub unsafe fn _mm_maskz_cvtpd_epi32(k: __mmask8, a: __m128d) -> __m128i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtpd_epu32&expand=1693)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtpd_epu32&expand=1693)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2udq))]
@@ -10444,7 +10444,7 @@ pub unsafe fn _mm512_cvtpd_epu32(a: __m512d) -> __m256i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtpd_epu32&expand=1694)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtpd_epu32&expand=1694)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2udq))]
@@ -10459,7 +10459,7 @@ pub unsafe fn _mm512_mask_cvtpd_epu32(src: __m256i, k: __mmask8, a: __m512d) ->
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtpd_epu32&expand=1695)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtpd_epu32&expand=1695)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2udq))]
@@ -10474,7 +10474,7 @@ pub unsafe fn _mm512_maskz_cvtpd_epu32(k: __mmask8, a: __m512d) -> __m256i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtpd_epu32&expand=1690)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtpd_epu32&expand=1690)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2udq))]
@@ -10488,7 +10488,7 @@ pub unsafe fn _mm256_cvtpd_epu32(a: __m256d) -> __m128i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtpd_epu32&expand=1691)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtpd_epu32&expand=1691)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2udq))]
@@ -10498,7 +10498,7 @@ pub unsafe fn _mm256_mask_cvtpd_epu32(src: __m128i, k: __mmask8, a: __m256d) ->
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtpd_epu32&expand=1692)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtpd_epu32&expand=1692)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2udq))]
@@ -10512,7 +10512,7 @@ pub unsafe fn _mm256_maskz_cvtpd_epu32(k: __mmask8, a: __m256d) -> __m128i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpd_epu32&expand=1687)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_epu32&expand=1687)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2udq))]
@@ -10526,7 +10526,7 @@ pub unsafe fn _mm_cvtpd_epu32(a: __m128d) -> __m128i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtpd_epu32&expand=1688)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtpd_epu32&expand=1688)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2udq))]
@@ -10536,7 +10536,7 @@ pub unsafe fn _mm_mask_cvtpd_epu32(src: __m128i, k: __mmask8, a: __m128d) -> __m
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtpd_epu32&expand=1689)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtpd_epu32&expand=1689)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtpd2udq))]
@@ -10550,7 +10550,7 @@ pub unsafe fn _mm_maskz_cvtpd_epu32(k: __mmask8, a: __m128d) -> __m128i {
/// Performs an element-by-element conversion of packed double-precision (64-bit) floating-point elements in v2 to single-precision (32-bit) floating-point elements and stores them in dst. The elements are stored in the lower half of the results vector, while the remaining upper half locations are set to 0.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtpd_pslo&expand=1715)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtpd_pslo&expand=1715)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2ps))]
@@ -10561,7 +10561,7 @@ pub unsafe fn _mm512_cvtpd_pslo(v2: __m512d) -> __m512 {
0b11111111,
_MM_FROUND_CUR_DIRECTION,
);
- simd_shuffle16!(
+ simd_shuffle!(
r,
_mm256_setzero_ps().as_f32x8(),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8, 8, 8, 8, 8],
@@ -10570,7 +10570,7 @@ pub unsafe fn _mm512_cvtpd_pslo(v2: __m512d) -> __m512 {
/// Performs an element-by-element conversion of packed double-precision (64-bit) floating-point elements in v2 to single-precision (32-bit) floating-point elements and stores them in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). The elements are stored in the lower half of the results vector, while the remaining upper half locations are set to 0.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtpd_pslo&expand=1716)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtpd_pslo&expand=1716)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2ps))]
@@ -10581,7 +10581,7 @@ pub unsafe fn _mm512_mask_cvtpd_pslo(src: __m512, k: __mmask8, v2: __m512d) -> _
k,
_MM_FROUND_CUR_DIRECTION,
);
- simd_shuffle16!(
+ simd_shuffle!(
r,
_mm256_setzero_ps().as_f32x8(),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8, 8, 8, 8, 8],
@@ -10590,7 +10590,7 @@ pub unsafe fn _mm512_mask_cvtpd_pslo(src: __m512, k: __mmask8, v2: __m512d) -> _
/// Sign extend packed 8-bit integers in a to packed 32-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi8_epi32&expand=1535)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi8_epi32&expand=1535)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxbd))]
@@ -10601,7 +10601,7 @@ pub unsafe fn _mm512_cvtepi8_epi32(a: __m128i) -> __m512i {
/// Sign extend packed 8-bit integers in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi8_epi32&expand=1536)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi8_epi32&expand=1536)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxbd))]
@@ -10612,7 +10612,7 @@ pub unsafe fn _mm512_mask_cvtepi8_epi32(src: __m512i, k: __mmask16, a: __m128i)
/// Sign extend packed 8-bit integers in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi8_epi32&expand=1537)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi8_epi32&expand=1537)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxbd))]
@@ -10624,7 +10624,7 @@ pub unsafe fn _mm512_maskz_cvtepi8_epi32(k: __mmask16, a: __m128i) -> __m512i {
/// Sign extend packed 8-bit integers in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi8_epi32&expand=1533)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi8_epi32&expand=1533)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxbd))]
@@ -10635,7 +10635,7 @@ pub unsafe fn _mm256_mask_cvtepi8_epi32(src: __m256i, k: __mmask8, a: __m128i) -
/// Sign extend packed 8-bit integers in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi8_epi32&expand=1534)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi8_epi32&expand=1534)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxbd))]
@@ -10647,7 +10647,7 @@ pub unsafe fn _mm256_maskz_cvtepi8_epi32(k: __mmask8, a: __m128i) -> __m256i {
/// Sign extend packed 8-bit integers in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi8_epi32&expand=1530)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi8_epi32&expand=1530)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxbd))]
@@ -10658,7 +10658,7 @@ pub unsafe fn _mm_mask_cvtepi8_epi32(src: __m128i, k: __mmask8, a: __m128i) -> _
/// Sign extend packed 8-bit integers in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi8_epi32&expand=1531)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi8_epi32&expand=1531)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxbd))]
@@ -10670,19 +10670,19 @@ pub unsafe fn _mm_maskz_cvtepi8_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Sign extend packed 8-bit integers in the low 8 bytes of a to packed 64-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi8_epi64&expand=1544)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi8_epi64&expand=1544)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxbq))]
pub unsafe fn _mm512_cvtepi8_epi64(a: __m128i) -> __m512i {
let a = a.as_i8x16();
- let v64: i8x8 = simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
+ let v64: i8x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
transmute::<i64x8, _>(simd_cast(v64))
}
/// Sign extend packed 8-bit integers in the low 8 bytes of a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi8_epi64&expand=1545)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi8_epi64&expand=1545)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxbq))]
@@ -10693,7 +10693,7 @@ pub unsafe fn _mm512_mask_cvtepi8_epi64(src: __m512i, k: __mmask8, a: __m128i) -
/// Sign extend packed 8-bit integers in the low 8 bytes of a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi8_epi64&expand=1546)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi8_epi64&expand=1546)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxbq))]
@@ -10705,7 +10705,7 @@ pub unsafe fn _mm512_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m512i {
/// Sign extend packed 8-bit integers in the low 4 bytes of a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi8_epi64&expand=1542)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi8_epi64&expand=1542)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxbq))]
@@ -10716,7 +10716,7 @@ pub unsafe fn _mm256_mask_cvtepi8_epi64(src: __m256i, k: __mmask8, a: __m128i) -
/// Sign extend packed 8-bit integers in the low 4 bytes of a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi8_epi64&expand=1543)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi8_epi64&expand=1543)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxbq))]
@@ -10728,7 +10728,7 @@ pub unsafe fn _mm256_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m256i {
/// Sign extend packed 8-bit integers in the low 2 bytes of a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi8_epi64&expand=1539)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi8_epi64&expand=1539)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxbq))]
@@ -10739,7 +10739,7 @@ pub unsafe fn _mm_mask_cvtepi8_epi64(src: __m128i, k: __mmask8, a: __m128i) -> _
/// Sign extend packed 8-bit integers in the low 2 bytes of a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi8_epi64&expand=1540)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi8_epi64&expand=1540)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxbq))]
@@ -10751,7 +10751,7 @@ pub unsafe fn _mm_maskz_cvtepi8_epi64(k: __mmask8, a: __m128i) -> __m128i {
/// Zero extend packed unsigned 8-bit integers in a to packed 32-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu8_epi32&expand=1621)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu8_epi32&expand=1621)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxbd))]
@@ -10762,7 +10762,7 @@ pub unsafe fn _mm512_cvtepu8_epi32(a: __m128i) -> __m512i {
/// Zero extend packed unsigned 8-bit integers in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu8_epi32&expand=1622)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu8_epi32&expand=1622)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxbd))]
@@ -10773,7 +10773,7 @@ pub unsafe fn _mm512_mask_cvtepu8_epi32(src: __m512i, k: __mmask16, a: __m128i)
/// Zero extend packed unsigned 8-bit integers in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu8_epi32&expand=1623)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu8_epi32&expand=1623)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxbd))]
@@ -10785,7 +10785,7 @@ pub unsafe fn _mm512_maskz_cvtepu8_epi32(k: __mmask16, a: __m128i) -> __m512i {
/// Zero extend packed unsigned 8-bit integers in the low 8 bytes of a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu8_epi32&expand=1619)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu8_epi32&expand=1619)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxbd))]
@@ -10808,7 +10808,7 @@ pub unsafe fn _mm256_maskz_cvtepu8_epi32(k: __mmask8, a: __m128i) -> __m256i {
/// Zero extend packed unsigned 8-bit integers in the low 4 bytes of a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu8_epi32&expand=1616)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu8_epi32&expand=1616)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxbd))]
@@ -10831,19 +10831,19 @@ pub unsafe fn _mm_maskz_cvtepu8_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Zero extend packed unsigned 8-bit integers in the low 8 byte sof a to packed 64-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu8_epi64&expand=1630)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu8_epi64&expand=1630)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxbq))]
pub unsafe fn _mm512_cvtepu8_epi64(a: __m128i) -> __m512i {
let a = a.as_u8x16();
- let v64: u8x8 = simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
+ let v64: u8x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
transmute::<i64x8, _>(simd_cast(v64))
}
/// Zero extend packed unsigned 8-bit integers in the low 8 bytes of a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu8_epi64&expand=1631)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu8_epi64&expand=1631)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxbq))]
@@ -10854,7 +10854,7 @@ pub unsafe fn _mm512_mask_cvtepu8_epi64(src: __m512i, k: __mmask8, a: __m128i) -
/// Zero extend packed unsigned 8-bit integers in the low 8 bytes of a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu8_epi64&expand=1632)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu8_epi64&expand=1632)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxbq))]
@@ -10866,7 +10866,7 @@ pub unsafe fn _mm512_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m512i {
/// Zero extend packed unsigned 8-bit integers in the low 4 bytes of a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu8_epi64&expand=1628)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu8_epi64&expand=1628)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxbq))]
@@ -10877,7 +10877,7 @@ pub unsafe fn _mm256_mask_cvtepu8_epi64(src: __m256i, k: __mmask8, a: __m128i) -
/// Zero extend packed unsigned 8-bit integers in the low 4 bytes of a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepu8_epi64&expand=1629)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepu8_epi64&expand=1629)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxbq))]
@@ -10889,7 +10889,7 @@ pub unsafe fn _mm256_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m256i {
/// Zero extend packed unsigned 8-bit integers in the low 2 bytes of a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu8_epi64&expand=1625)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu8_epi64&expand=1625)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxbq))]
@@ -10900,7 +10900,7 @@ pub unsafe fn _mm_mask_cvtepu8_epi64(src: __m128i, k: __mmask8, a: __m128i) -> _
/// Zero extend packed unsigned 8-bit integers in the low 2 bytes of a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepu8_epi64&expand=1626)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepu8_epi64&expand=1626)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxbq))]
@@ -10912,7 +10912,7 @@ pub unsafe fn _mm_maskz_cvtepu8_epi64(k: __mmask8, a: __m128i) -> __m128i {
/// Sign extend packed 16-bit integers in a to packed 32-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi16_epi32&expand=1389)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi16_epi32&expand=1389)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxwd))]
@@ -10923,7 +10923,7 @@ pub unsafe fn _mm512_cvtepi16_epi32(a: __m256i) -> __m512i {
/// Sign extend packed 16-bit integers in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi16_epi32&expand=1390)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi16_epi32&expand=1390)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxwd))]
@@ -10934,7 +10934,7 @@ pub unsafe fn _mm512_mask_cvtepi16_epi32(src: __m512i, k: __mmask16, a: __m256i)
/// Sign extend packed 16-bit integers in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi16_epi32&expand=1391)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi16_epi32&expand=1391)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxwd))]
@@ -10946,7 +10946,7 @@ pub unsafe fn _mm512_maskz_cvtepi16_epi32(k: __mmask16, a: __m256i) -> __m512i {
/// Sign extend packed 16-bit integers in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi16_epi32&expand=1387)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi16_epi32&expand=1387)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxwd))]
@@ -10957,7 +10957,7 @@ pub unsafe fn _mm256_mask_cvtepi16_epi32(src: __m256i, k: __mmask8, a: __m128i)
/// Sign extend packed 16-bit integers in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi16_epi32&expand=1388)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi16_epi32&expand=1388)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxwd))]
@@ -10969,7 +10969,7 @@ pub unsafe fn _mm256_maskz_cvtepi16_epi32(k: __mmask8, a: __m128i) -> __m256i {
/// Sign extend packed 16-bit integers in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi16_epi32&expand=1384)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi16_epi32&expand=1384)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxwd))]
@@ -10980,7 +10980,7 @@ pub unsafe fn _mm_mask_cvtepi16_epi32(src: __m128i, k: __mmask8, a: __m128i) ->
/// Sign extend packed 16-bit integers in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi16_epi32&expand=1385)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi16_epi32&expand=1385)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxwd))]
@@ -10992,7 +10992,7 @@ pub unsafe fn _mm_maskz_cvtepi16_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Sign extend packed 16-bit integers in a to packed 64-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi16_epi64&expand=1398)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi16_epi64&expand=1398)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxwq))]
@@ -11003,7 +11003,7 @@ pub unsafe fn _mm512_cvtepi16_epi64(a: __m128i) -> __m512i {
/// Sign extend packed 16-bit integers in a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi16_epi64&expand=1399)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi16_epi64&expand=1399)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxwq))]
@@ -11014,7 +11014,7 @@ pub unsafe fn _mm512_mask_cvtepi16_epi64(src: __m512i, k: __mmask8, a: __m128i)
/// Sign extend packed 16-bit integers in a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi16_epi64&expand=1400)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi16_epi64&expand=1400)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxwq))]
@@ -11026,7 +11026,7 @@ pub unsafe fn _mm512_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m512i {
/// Sign extend packed 16-bit integers in a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi16_epi64&expand=1396)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi16_epi64&expand=1396)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxwq))]
@@ -11037,7 +11037,7 @@ pub unsafe fn _mm256_mask_cvtepi16_epi64(src: __m256i, k: __mmask8, a: __m128i)
/// Sign extend packed 16-bit integers in a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi16_epi64&expand=1397)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi16_epi64&expand=1397)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxwq))]
@@ -11049,7 +11049,7 @@ pub unsafe fn _mm256_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m256i {
/// Sign extend packed 16-bit integers in a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi16_epi64&expand=1393)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi16_epi64&expand=1393)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxwq))]
@@ -11060,7 +11060,7 @@ pub unsafe fn _mm_mask_cvtepi16_epi64(src: __m128i, k: __mmask8, a: __m128i) ->
/// Sign extend packed 16-bit integers in a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi16_epi64&expand=1394)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi16_epi64&expand=1394)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxwq))]
@@ -11072,7 +11072,7 @@ pub unsafe fn _mm_maskz_cvtepi16_epi64(k: __mmask8, a: __m128i) -> __m128i {
/// Zero extend packed unsigned 16-bit integers in a to packed 32-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu16_epi32&expand=1553)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu16_epi32&expand=1553)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxwd))]
@@ -11083,7 +11083,7 @@ pub unsafe fn _mm512_cvtepu16_epi32(a: __m256i) -> __m512i {
/// Zero extend packed unsigned 16-bit integers in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu16_epi32&expand=1554)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu16_epi32&expand=1554)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxwd))]
@@ -11094,7 +11094,7 @@ pub unsafe fn _mm512_mask_cvtepu16_epi32(src: __m512i, k: __mmask16, a: __m256i)
/// Zero extend packed unsigned 16-bit integers in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu16_epi32&expand=1555)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu16_epi32&expand=1555)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxwd))]
@@ -11106,7 +11106,7 @@ pub unsafe fn _mm512_maskz_cvtepu16_epi32(k: __mmask16, a: __m256i) -> __m512i {
/// Zero extend packed unsigned 16-bit integers in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu16_epi32&expand=1551)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu16_epi32&expand=1551)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxwd))]
@@ -11117,7 +11117,7 @@ pub unsafe fn _mm256_mask_cvtepu16_epi32(src: __m256i, k: __mmask8, a: __m128i)
/// Zero extend packed unsigned 16-bit integers in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepu16_epi32&expand=1552)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepu16_epi32&expand=1552)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxwd))]
@@ -11129,7 +11129,7 @@ pub unsafe fn _mm256_maskz_cvtepu16_epi32(k: __mmask8, a: __m128i) -> __m256i {
/// Zero extend packed unsigned 16-bit integers in a to packed 32-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu16_epi32&expand=1548)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu16_epi32&expand=1548)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxwd))]
@@ -11140,7 +11140,7 @@ pub unsafe fn _mm_mask_cvtepu16_epi32(src: __m128i, k: __mmask8, a: __m128i) ->
/// Zero extend packed unsigned 16-bit integers in a to packed 32-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepu16_epi32&expand=1549)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepu16_epi32&expand=1549)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxwd))]
@@ -11152,7 +11152,7 @@ pub unsafe fn _mm_maskz_cvtepu16_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Zero extend packed unsigned 16-bit integers in a to packed 64-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu16_epi64&expand=1562)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu16_epi64&expand=1562)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxwq))]
@@ -11163,7 +11163,7 @@ pub unsafe fn _mm512_cvtepu16_epi64(a: __m128i) -> __m512i {
/// Zero extend packed unsigned 16-bit integers in a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu16_epi64&expand=1563)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu16_epi64&expand=1563)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxwq))]
@@ -11174,7 +11174,7 @@ pub unsafe fn _mm512_mask_cvtepu16_epi64(src: __m512i, k: __mmask8, a: __m128i)
/// Zero extend packed unsigned 16-bit integers in a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu16_epi64&expand=1564)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu16_epi64&expand=1564)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxwq))]
@@ -11186,7 +11186,7 @@ pub unsafe fn _mm512_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m512i {
/// Zero extend packed unsigned 16-bit integers in the low 8 bytes of a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu16_epi64&expand=1560)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu16_epi64&expand=1560)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxwq))]
@@ -11197,7 +11197,7 @@ pub unsafe fn _mm256_mask_cvtepu16_epi64(src: __m256i, k: __mmask8, a: __m128i)
/// Zero extend packed unsigned 16-bit integers in the low 8 bytes of a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepu16_epi64&expand=1561)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepu16_epi64&expand=1561)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxwq))]
@@ -11209,7 +11209,7 @@ pub unsafe fn _mm256_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m256i {
/// Zero extend packed unsigned 16-bit integers in the low 4 bytes of a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu16_epi64&expand=1557)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu16_epi64&expand=1557)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxwq))]
@@ -11220,7 +11220,7 @@ pub unsafe fn _mm_mask_cvtepu16_epi64(src: __m128i, k: __mmask8, a: __m128i) ->
/// Zero extend packed unsigned 16-bit integers in the low 4 bytes of a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepu16_epi64&expand=1558)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepu16_epi64&expand=1558)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxwq))]
@@ -11232,7 +11232,7 @@ pub unsafe fn _mm_maskz_cvtepu16_epi64(k: __mmask8, a: __m128i) -> __m128i {
/// Sign extend packed 32-bit integers in a to packed 64-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi32_epi64&expand=1428)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi32_epi64&expand=1428)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxdq))]
@@ -11243,7 +11243,7 @@ pub unsafe fn _mm512_cvtepi32_epi64(a: __m256i) -> __m512i {
/// Sign extend packed 32-bit integers in a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi32_epi64&expand=1429)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_epi64&expand=1429)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxdq))]
@@ -11254,7 +11254,7 @@ pub unsafe fn _mm512_mask_cvtepi32_epi64(src: __m512i, k: __mmask8, a: __m256i)
/// Sign extend packed 32-bit integers in a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi32_epi64&expand=1430)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi32_epi64&expand=1430)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsxdq))]
@@ -11266,7 +11266,7 @@ pub unsafe fn _mm512_maskz_cvtepi32_epi64(k: __mmask8, a: __m256i) -> __m512i {
/// Sign extend packed 32-bit integers in a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi32_epi64&expand=1426)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_epi64&expand=1426)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxdq))]
@@ -11277,7 +11277,7 @@ pub unsafe fn _mm256_mask_cvtepi32_epi64(src: __m256i, k: __mmask8, a: __m128i)
/// Sign extend packed 32-bit integers in a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi32_epi64&expand=1427)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi32_epi64&expand=1427)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxdq))]
@@ -11289,7 +11289,7 @@ pub unsafe fn _mm256_maskz_cvtepi32_epi64(k: __mmask8, a: __m128i) -> __m256i {
/// Sign extend packed 32-bit integers in a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi32_epi64&expand=1423)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_epi64&expand=1423)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxdq))]
@@ -11300,7 +11300,7 @@ pub unsafe fn _mm_mask_cvtepi32_epi64(src: __m128i, k: __mmask8, a: __m128i) ->
/// Sign extend packed 32-bit integers in a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi32_epi64&expand=1424)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi32_epi64&expand=1424)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsxdq))]
@@ -11312,7 +11312,7 @@ pub unsafe fn _mm_maskz_cvtepi32_epi64(k: __mmask8, a: __m128i) -> __m128i {
/// Zero extend packed unsigned 32-bit integers in a to packed 64-bit integers, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu32_epi64&expand=1571)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu32_epi64&expand=1571)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxdq))]
@@ -11323,7 +11323,7 @@ pub unsafe fn _mm512_cvtepu32_epi64(a: __m256i) -> __m512i {
/// Zero extend packed unsigned 32-bit integers in a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu32_epi64&expand=1572)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu32_epi64&expand=1572)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxdq))]
@@ -11334,7 +11334,7 @@ pub unsafe fn _mm512_mask_cvtepu32_epi64(src: __m512i, k: __mmask8, a: __m256i)
/// Zero extend packed unsigned 32-bit integers in a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu32_epi64&expand=1573)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu32_epi64&expand=1573)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovzxdq))]
@@ -11346,7 +11346,7 @@ pub unsafe fn _mm512_maskz_cvtepu32_epi64(k: __mmask8, a: __m256i) -> __m512i {
/// Zero extend packed unsigned 32-bit integers in a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu32_epi64&expand=1569)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu32_epi64&expand=1569)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxdq))]
@@ -11357,7 +11357,7 @@ pub unsafe fn _mm256_mask_cvtepu32_epi64(src: __m256i, k: __mmask8, a: __m128i)
/// Zero extend packed unsigned 32-bit integers in a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepu32_epi64&expand=1570)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepu32_epi64&expand=1570)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxdq))]
@@ -11369,7 +11369,7 @@ pub unsafe fn _mm256_maskz_cvtepu32_epi64(k: __mmask8, a: __m128i) -> __m256i {
/// Zero extend packed unsigned 32-bit integers in a to packed 64-bit integers, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu32_epi64&expand=1566)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu32_epi64&expand=1566)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxdq))]
@@ -11380,7 +11380,7 @@ pub unsafe fn _mm_mask_cvtepu32_epi64(src: __m128i, k: __mmask8, a: __m128i) ->
/// Zero extend packed unsigned 32-bit integers in a to packed 64-bit integers, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepu32_epi64&expand=1567)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepu32_epi64&expand=1567)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovzxdq))]
@@ -11392,7 +11392,7 @@ pub unsafe fn _mm_maskz_cvtepu32_epi64(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed signed 32-bit integers in a to packed single-precision (32-bit) floating-point elements, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi32_ps&expand=1455)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi32_ps&expand=1455)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtdq2ps))]
@@ -11403,7 +11403,7 @@ pub unsafe fn _mm512_cvtepi32_ps(a: __m512i) -> __m512 {
/// Convert packed signed 32-bit integers in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi32_ps&expand=1456)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_ps&expand=1456)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtdq2ps))]
@@ -11414,7 +11414,7 @@ pub unsafe fn _mm512_mask_cvtepi32_ps(src: __m512, k: __mmask16, a: __m512i) ->
/// Convert packed signed 32-bit integers in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi32_ps&expand=1457)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi32_ps&expand=1457)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtdq2ps))]
@@ -11426,7 +11426,7 @@ pub unsafe fn _mm512_maskz_cvtepi32_ps(k: __mmask16, a: __m512i) -> __m512 {
/// Convert packed signed 32-bit integers in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi32_ps&expand=1453)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_ps&expand=1453)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2ps))]
@@ -11437,7 +11437,7 @@ pub unsafe fn _mm256_mask_cvtepi32_ps(src: __m256, k: __mmask8, a: __m256i) -> _
/// Convert packed signed 32-bit integers in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi32_ps&expand=1454)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi32_ps&expand=1454)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2ps))]
@@ -11449,7 +11449,7 @@ pub unsafe fn _mm256_maskz_cvtepi32_ps(k: __mmask8, a: __m256i) -> __m256 {
/// Convert packed signed 32-bit integers in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi32_ps&expand=1450)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_ps&expand=1450)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2ps))]
@@ -11460,7 +11460,7 @@ pub unsafe fn _mm_mask_cvtepi32_ps(src: __m128, k: __mmask8, a: __m128i) -> __m1
/// Convert packed signed 32-bit integers in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi32_ps&expand=1451)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi32_ps&expand=1451)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2ps))]
@@ -11472,7 +11472,7 @@ pub unsafe fn _mm_maskz_cvtepi32_ps(k: __mmask8, a: __m128i) -> __m128 {
/// Convert packed signed 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi32_pd&expand=1446)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi32_pd&expand=1446)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtdq2pd))]
@@ -11483,7 +11483,7 @@ pub unsafe fn _mm512_cvtepi32_pd(a: __m256i) -> __m512d {
/// Convert packed signed 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi32_pd&expand=1447)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_pd&expand=1447)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtdq2pd))]
@@ -11494,7 +11494,7 @@ pub unsafe fn _mm512_mask_cvtepi32_pd(src: __m512d, k: __mmask8, a: __m256i) ->
/// Convert packed signed 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi32_pd&expand=1448)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi32_pd&expand=1448)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtdq2pd))]
@@ -11506,7 +11506,7 @@ pub unsafe fn _mm512_maskz_cvtepi32_pd(k: __mmask8, a: __m256i) -> __m512d {
/// Convert packed signed 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi32_pd&expand=1444)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_pd&expand=1444)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2pd))]
@@ -11517,7 +11517,7 @@ pub unsafe fn _mm256_mask_cvtepi32_pd(src: __m256d, k: __mmask8, a: __m128i) ->
/// Convert packed signed 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi32_pd&expand=1445)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi32_pd&expand=1445)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2pd))]
@@ -11529,7 +11529,7 @@ pub unsafe fn _mm256_maskz_cvtepi32_pd(k: __mmask8, a: __m128i) -> __m256d {
/// Convert packed signed 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi32_pd&expand=1441)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_pd&expand=1441)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2pd))]
@@ -11540,7 +11540,7 @@ pub unsafe fn _mm_mask_cvtepi32_pd(src: __m128d, k: __mmask8, a: __m128i) -> __m
/// Convert packed signed 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi32_pd&expand=1442)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi32_pd&expand=1442)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtdq2pd))]
@@ -11552,7 +11552,7 @@ pub unsafe fn _mm_maskz_cvtepi32_pd(k: __mmask8, a: __m128i) -> __m128d {
/// Convert packed unsigned 32-bit integers in a to packed single-precision (32-bit) floating-point elements, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu32_ps&expand=1583)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu32_ps&expand=1583)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtudq2ps))]
@@ -11563,7 +11563,7 @@ pub unsafe fn _mm512_cvtepu32_ps(a: __m512i) -> __m512 {
/// Convert packed unsigned 32-bit integers in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu32_ps&expand=1584)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu32_ps&expand=1584)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtudq2ps))]
@@ -11574,7 +11574,7 @@ pub unsafe fn _mm512_mask_cvtepu32_ps(src: __m512, k: __mmask16, a: __m512i) ->
/// Convert packed unsigned 32-bit integers in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu32_ps&expand=1585)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu32_ps&expand=1585)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtudq2ps))]
@@ -11586,7 +11586,7 @@ pub unsafe fn _mm512_maskz_cvtepu32_ps(k: __mmask16, a: __m512i) -> __m512 {
/// Convert packed unsigned 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu32_pd&expand=1580)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu32_pd&expand=1580)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtudq2pd))]
@@ -11597,7 +11597,7 @@ pub unsafe fn _mm512_cvtepu32_pd(a: __m256i) -> __m512d {
/// Convert packed unsigned 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu32_pd&expand=1581)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu32_pd&expand=1581)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtudq2pd))]
@@ -11608,7 +11608,7 @@ pub unsafe fn _mm512_mask_cvtepu32_pd(src: __m512d, k: __mmask8, a: __m256i) ->
/// Convert packed unsigned 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu32_pd&expand=1582)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepu32_pd&expand=1582)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtudq2pd))]
@@ -11620,7 +11620,7 @@ pub unsafe fn _mm512_maskz_cvtepu32_pd(k: __mmask8, a: __m256i) -> __m512d {
/// Convert packed unsigned 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu32_pd&expand=1577)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepu32_pd&expand=1577)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtudq2pd))]
@@ -11631,7 +11631,7 @@ pub unsafe fn _mm256_cvtepu32_pd(a: __m128i) -> __m256d {
/// Convert packed unsigned 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu32_pd&expand=1578)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepu32_pd&expand=1578)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtudq2pd))]
@@ -11642,7 +11642,7 @@ pub unsafe fn _mm256_mask_cvtepu32_pd(src: __m256d, k: __mmask8, a: __m128i) ->
/// Convert packed unsigned 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepu32_pd&expand=1579)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepu32_pd&expand=1579)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtudq2pd))]
@@ -11654,19 +11654,19 @@ pub unsafe fn _mm256_maskz_cvtepu32_pd(k: __mmask8, a: __m128i) -> __m256d {
/// Convert packed unsigned 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu32_pd&expand=1574)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu32_pd&expand=1574)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtudq2pd))]
pub unsafe fn _mm_cvtepu32_pd(a: __m128i) -> __m128d {
let a = a.as_u32x4();
- let u64: u32x2 = simd_shuffle2!(a, a, [0, 1]);
+ let u64: u32x2 = simd_shuffle!(a, a, [0, 1]);
transmute::<f64x2, _>(simd_cast(u64))
}
/// Convert packed unsigned 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu32_pd&expand=1575)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepu32_pd&expand=1575)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtudq2pd))]
@@ -11677,7 +11677,7 @@ pub unsafe fn _mm_mask_cvtepu32_pd(src: __m128d, k: __mmask8, a: __m128i) -> __m
/// Convert packed unsigned 32-bit integers in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepu32_pd&expand=1576)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepu32_pd&expand=1576)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtudq2pd))]
@@ -11689,19 +11689,19 @@ pub unsafe fn _mm_maskz_cvtepu32_pd(k: __mmask8, a: __m128i) -> __m128d {
/// Performs element-by-element conversion of the lower half of packed 32-bit integer elements in v2 to packed double-precision (64-bit) floating-point elements, storing the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi32lo_pd&expand=1464)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi32lo_pd&expand=1464)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtdq2pd))]
pub unsafe fn _mm512_cvtepi32lo_pd(v2: __m512i) -> __m512d {
let v2 = v2.as_i32x16();
- let v256: i32x8 = simd_shuffle8!(v2, v2, [0, 1, 2, 3, 4, 5, 6, 7]);
+ let v256: i32x8 = simd_shuffle!(v2, v2, [0, 1, 2, 3, 4, 5, 6, 7]);
transmute::<f64x8, _>(simd_cast(v256))
}
/// Performs element-by-element conversion of the lower half of packed 32-bit integer elements in v2 to packed double-precision (64-bit) floating-point elements, storing the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi32lo_pd&expand=1465)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32lo_pd&expand=1465)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtdq2pd))]
@@ -11712,19 +11712,19 @@ pub unsafe fn _mm512_mask_cvtepi32lo_pd(src: __m512d, k: __mmask8, v2: __m512i)
/// Performs element-by-element conversion of the lower half of packed 32-bit unsigned integer elements in v2 to packed double-precision (64-bit) floating-point elements, storing the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu32lo_pd&expand=1586)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepu32lo_pd&expand=1586)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtudq2pd))]
pub unsafe fn _mm512_cvtepu32lo_pd(v2: __m512i) -> __m512d {
let v2 = v2.as_u32x16();
- let v256: u32x8 = simd_shuffle8!(v2, v2, [0, 1, 2, 3, 4, 5, 6, 7]);
+ let v256: u32x8 = simd_shuffle!(v2, v2, [0, 1, 2, 3, 4, 5, 6, 7]);
transmute::<f64x8, _>(simd_cast(v256))
}
/// Performs element-by-element conversion of the lower half of 32-bit unsigned integer elements in v2 to packed double-precision (64-bit) floating-point elements, storing the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu32lo_pd&expand=1587)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepu32lo_pd&expand=1587)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtudq2pd))]
@@ -11735,7 +11735,7 @@ pub unsafe fn _mm512_mask_cvtepu32lo_pd(src: __m512d, k: __mmask8, v2: __m512i)
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi32_epi16&expand=1419)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi32_epi16&expand=1419)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovdw))]
@@ -11746,7 +11746,7 @@ pub unsafe fn _mm512_cvtepi32_epi16(a: __m512i) -> __m256i {
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi32_epi16&expand=1420)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_epi16&expand=1420)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovdw))]
@@ -11757,7 +11757,7 @@ pub unsafe fn _mm512_mask_cvtepi32_epi16(src: __m256i, k: __mmask16, a: __m512i)
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi32_epi16&expand=1421)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi32_epi16&expand=1421)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovdw))]
@@ -11769,7 +11769,7 @@ pub unsafe fn _mm512_maskz_cvtepi32_epi16(k: __mmask16, a: __m512i) -> __m256i {
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi32_epi16&expand=1416)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi32_epi16&expand=1416)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdw))]
@@ -11780,7 +11780,7 @@ pub unsafe fn _mm256_cvtepi32_epi16(a: __m256i) -> __m128i {
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi32_epi16&expand=1417)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_epi16&expand=1417)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdw))]
@@ -11791,7 +11791,7 @@ pub unsafe fn _mm256_mask_cvtepi32_epi16(src: __m128i, k: __mmask8, a: __m256i)
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi32_epi16&expand=1418)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi32_epi16&expand=1418)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdw))]
@@ -11803,7 +11803,7 @@ pub unsafe fn _mm256_maskz_cvtepi32_epi16(k: __mmask8, a: __m256i) -> __m128i {
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_epi16&expand=1413)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_epi16&expand=1413)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdw))]
@@ -11817,7 +11817,7 @@ pub unsafe fn _mm_cvtepi32_epi16(a: __m128i) -> __m128i {
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi32_epi16&expand=1414)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_epi16&expand=1414)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdw))]
@@ -11827,7 +11827,7 @@ pub unsafe fn _mm_mask_cvtepi32_epi16(src: __m128i, k: __mmask8, a: __m128i) ->
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi32_epi16&expand=1415)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi32_epi16&expand=1415)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdw))]
@@ -11837,7 +11837,7 @@ pub unsafe fn _mm_maskz_cvtepi32_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi32_epi8&expand=1437)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi32_epi8&expand=1437)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovdb))]
@@ -11848,7 +11848,7 @@ pub unsafe fn _mm512_cvtepi32_epi8(a: __m512i) -> __m128i {
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi32_epi8&expand=1438)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_epi8&expand=1438)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovdb))]
@@ -11859,7 +11859,7 @@ pub unsafe fn _mm512_mask_cvtepi32_epi8(src: __m128i, k: __mmask16, a: __m512i)
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi32_epi8&expand=1439)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi32_epi8&expand=1439)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovdb))]
@@ -11871,7 +11871,7 @@ pub unsafe fn _mm512_maskz_cvtepi32_epi8(k: __mmask16, a: __m512i) -> __m128i {
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi32_epi8&expand=1434)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi32_epi8&expand=1434)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdb))]
@@ -11885,7 +11885,7 @@ pub unsafe fn _mm256_cvtepi32_epi8(a: __m256i) -> __m128i {
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi32_epi8&expand=1435)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_epi8&expand=1435)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdb))]
@@ -11895,7 +11895,7 @@ pub unsafe fn _mm256_mask_cvtepi32_epi8(src: __m128i, k: __mmask8, a: __m256i) -
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi32_epi8&expand=1436)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi32_epi8&expand=1436)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdb))]
@@ -11905,7 +11905,7 @@ pub unsafe fn _mm256_maskz_cvtepi32_epi8(k: __mmask8, a: __m256i) -> __m128i {
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_epi8&expand=1431)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_epi8&expand=1431)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdb))]
@@ -11919,7 +11919,7 @@ pub unsafe fn _mm_cvtepi32_epi8(a: __m128i) -> __m128i {
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi32_epi8&expand=1432)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_epi8&expand=1432)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdb))]
@@ -11929,7 +11929,7 @@ pub unsafe fn _mm_mask_cvtepi32_epi8(src: __m128i, k: __mmask8, a: __m128i) -> _
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi32_epi8&expand=1433)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi32_epi8&expand=1433)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdb))]
@@ -11939,7 +11939,7 @@ pub unsafe fn _mm_maskz_cvtepi32_epi8(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi64_epi32&expand=1481)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi64_epi32&expand=1481)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqd))]
@@ -11950,7 +11950,7 @@ pub unsafe fn _mm512_cvtepi64_epi32(a: __m512i) -> __m256i {
/// Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi64_epi32&expand=1482)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi64_epi32&expand=1482)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqd))]
@@ -11961,7 +11961,7 @@ pub unsafe fn _mm512_mask_cvtepi64_epi32(src: __m256i, k: __mmask8, a: __m512i)
/// Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi64_epi32&expand=1483)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi64_epi32&expand=1483)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqd))]
@@ -11973,7 +11973,7 @@ pub unsafe fn _mm512_maskz_cvtepi64_epi32(k: __mmask8, a: __m512i) -> __m256i {
/// Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi64_epi32&expand=1478)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi64_epi32&expand=1478)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqd))]
@@ -11984,7 +11984,7 @@ pub unsafe fn _mm256_cvtepi64_epi32(a: __m256i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi64_epi32&expand=1479)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi64_epi32&expand=1479)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqd))]
@@ -11995,7 +11995,7 @@ pub unsafe fn _mm256_mask_cvtepi64_epi32(src: __m128i, k: __mmask8, a: __m256i)
/// Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi64_epi32&expand=1480)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi64_epi32&expand=1480)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqd))]
@@ -12007,7 +12007,7 @@ pub unsafe fn _mm256_maskz_cvtepi64_epi32(k: __mmask8, a: __m256i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi64_epi32&expand=1475)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi64_epi32&expand=1475)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqd))]
@@ -12021,7 +12021,7 @@ pub unsafe fn _mm_cvtepi64_epi32(a: __m128i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi64_epi32&expand=1476)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi64_epi32&expand=1476)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqd))]
@@ -12031,7 +12031,7 @@ pub unsafe fn _mm_mask_cvtepi64_epi32(src: __m128i, k: __mmask8, a: __m128i) ->
/// Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi64_epi32&expand=1477)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi64_epi32&expand=1477)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqd))]
@@ -12041,7 +12041,7 @@ pub unsafe fn _mm_maskz_cvtepi64_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi64_epi16&expand=1472)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi64_epi16&expand=1472)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqw))]
@@ -12052,7 +12052,7 @@ pub unsafe fn _mm512_cvtepi64_epi16(a: __m512i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi64_epi16&expand=1473)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi64_epi16&expand=1473)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqw))]
@@ -12063,7 +12063,7 @@ pub unsafe fn _mm512_mask_cvtepi64_epi16(src: __m128i, k: __mmask8, a: __m512i)
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi64_epi16&expand=1474)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi64_epi16&expand=1474)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqw))]
@@ -12075,7 +12075,7 @@ pub unsafe fn _mm512_maskz_cvtepi64_epi16(k: __mmask8, a: __m512i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi64_epi16&expand=1469)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi64_epi16&expand=1469)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqw))]
@@ -12089,7 +12089,7 @@ pub unsafe fn _mm256_cvtepi64_epi16(a: __m256i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi64_epi16&expand=1470)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi64_epi16&expand=1470)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqw))]
@@ -12099,7 +12099,7 @@ pub unsafe fn _mm256_mask_cvtepi64_epi16(src: __m128i, k: __mmask8, a: __m256i)
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi64_epi16&expand=1471)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi64_epi16&expand=1471)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqw))]
@@ -12109,7 +12109,7 @@ pub unsafe fn _mm256_maskz_cvtepi64_epi16(k: __mmask8, a: __m256i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi64_epi16&expand=1466)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi64_epi16&expand=1466)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqw))]
@@ -12123,7 +12123,7 @@ pub unsafe fn _mm_cvtepi64_epi16(a: __m128i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi64_epi16&expand=1467)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi64_epi16&expand=1467)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqw))]
@@ -12133,7 +12133,7 @@ pub unsafe fn _mm_mask_cvtepi64_epi16(src: __m128i, k: __mmask8, a: __m128i) ->
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi64_epi16&expand=1468)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi64_epi16&expand=1468)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqw))]
@@ -12143,7 +12143,7 @@ pub unsafe fn _mm_maskz_cvtepi64_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi64_epi8&expand=1490)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtepi64_epi8&expand=1490)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqb))]
@@ -12157,7 +12157,7 @@ pub unsafe fn _mm512_cvtepi64_epi8(a: __m512i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi64_epi8&expand=1491)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi64_epi8&expand=1491)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqb))]
@@ -12167,7 +12167,7 @@ pub unsafe fn _mm512_mask_cvtepi64_epi8(src: __m128i, k: __mmask8, a: __m512i) -
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi64_epi8&expand=1492)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtepi64_epi8&expand=1492)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqb))]
@@ -12177,7 +12177,7 @@ pub unsafe fn _mm512_maskz_cvtepi64_epi8(k: __mmask8, a: __m512i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi64_epi8&expand=1487)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtepi64_epi8&expand=1487)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqb))]
@@ -12191,7 +12191,7 @@ pub unsafe fn _mm256_cvtepi64_epi8(a: __m256i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi64_epi8&expand=1488)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi64_epi8&expand=1488)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqb))]
@@ -12201,7 +12201,7 @@ pub unsafe fn _mm256_mask_cvtepi64_epi8(src: __m128i, k: __mmask8, a: __m256i) -
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi64_epi8&expand=1489)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtepi64_epi8&expand=1489)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqb))]
@@ -12211,7 +12211,7 @@ pub unsafe fn _mm256_maskz_cvtepi64_epi8(k: __mmask8, a: __m256i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi64_epi8&expand=1484)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi64_epi8&expand=1484)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqb))]
@@ -12225,7 +12225,7 @@ pub unsafe fn _mm_cvtepi64_epi8(a: __m128i) -> __m128i {
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi64_epi8&expand=1485)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi64_epi8&expand=1485)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqb))]
@@ -12235,7 +12235,7 @@ pub unsafe fn _mm_mask_cvtepi64_epi8(src: __m128i, k: __mmask8, a: __m128i) -> _
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi64_epi8&expand=1486)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtepi64_epi8&expand=1486)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqb))]
@@ -12245,7 +12245,7 @@ pub unsafe fn _mm_maskz_cvtepi64_epi8(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtsepi32_epi16&expand=1819)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi32_epi16&expand=1819)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
@@ -12259,7 +12259,7 @@ pub unsafe fn _mm512_cvtsepi32_epi16(a: __m512i) -> __m256i {
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtsepi32_epi16&expand=1820)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi32_epi16&expand=1820)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
@@ -12269,7 +12269,7 @@ pub unsafe fn _mm512_mask_cvtsepi32_epi16(src: __m256i, k: __mmask16, a: __m512i
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtsepi32_epi16&expand=1819)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi32_epi16&expand=1819)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
@@ -12283,7 +12283,7 @@ pub unsafe fn _mm512_maskz_cvtsepi32_epi16(k: __mmask16, a: __m512i) -> __m256i
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsepi32_epi16&expand=1816)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsepi32_epi16&expand=1816)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
@@ -12297,7 +12297,7 @@ pub unsafe fn _mm256_cvtsepi32_epi16(a: __m256i) -> __m128i {
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtsepi32_epi16&expand=1817)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi32_epi16&expand=1817)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
@@ -12307,7 +12307,7 @@ pub unsafe fn _mm256_mask_cvtsepi32_epi16(src: __m128i, k: __mmask8, a: __m256i)
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtsepi32_epi16&expand=1818)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtsepi32_epi16&expand=1818)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
@@ -12317,7 +12317,7 @@ pub unsafe fn _mm256_maskz_cvtsepi32_epi16(k: __mmask8, a: __m256i) -> __m128i {
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsepi32_epi16&expand=1813)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsepi32_epi16&expand=1813)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
@@ -12331,7 +12331,7 @@ pub unsafe fn _mm_cvtsepi32_epi16(a: __m128i) -> __m128i {
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsepi32_epi16&expand=1814)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi32_epi16&expand=1814)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
@@ -12341,7 +12341,7 @@ pub unsafe fn _mm_mask_cvtsepi32_epi16(src: __m128i, k: __mmask8, a: __m128i) ->
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsepi32_epi16&expand=1815)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtsepi32_epi16&expand=1815)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
@@ -12351,7 +12351,7 @@ pub unsafe fn _mm_maskz_cvtsepi32_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtsepi32_epi8&expand=1828)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi32_epi8&expand=1828)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
@@ -12365,7 +12365,7 @@ pub unsafe fn _mm512_cvtsepi32_epi8(a: __m512i) -> __m128i {
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtsepi32_epi8&expand=1829)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi32_epi8&expand=1829)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
@@ -12375,7 +12375,7 @@ pub unsafe fn _mm512_mask_cvtsepi32_epi8(src: __m128i, k: __mmask16, a: __m512i)
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtsepi32_epi8&expand=1830)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtsepi32_epi8&expand=1830)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
@@ -12385,7 +12385,7 @@ pub unsafe fn _mm512_maskz_cvtsepi32_epi8(k: __mmask16, a: __m512i) -> __m128i {
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsepi32_epi8&expand=1825)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsepi32_epi8&expand=1825)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
@@ -12399,7 +12399,7 @@ pub unsafe fn _mm256_cvtsepi32_epi8(a: __m256i) -> __m128i {
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtsepi32_epi8&expand=1826)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi32_epi8&expand=1826)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
@@ -12409,7 +12409,7 @@ pub unsafe fn _mm256_mask_cvtsepi32_epi8(src: __m128i, k: __mmask8, a: __m256i)
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtsepi32_epi8&expand=1827)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtsepi32_epi8&expand=1827)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
@@ -12419,7 +12419,7 @@ pub unsafe fn _mm256_maskz_cvtsepi32_epi8(k: __mmask8, a: __m256i) -> __m128i {
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsepi32_epi8&expand=1822)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsepi32_epi8&expand=1822)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
@@ -12433,7 +12433,7 @@ pub unsafe fn _mm_cvtsepi32_epi8(a: __m128i) -> __m128i {
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsepi32_epi8&expand=1823)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi32_epi8&expand=1823)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
@@ -12443,7 +12443,7 @@ pub unsafe fn _mm_mask_cvtsepi32_epi8(src: __m128i, k: __mmask8, a: __m128i) ->
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsepi32_epi8&expand=1824)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtsepi32_epi8&expand=1824)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
@@ -12453,7 +12453,7 @@ pub unsafe fn _mm_maskz_cvtsepi32_epi8(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtsepi64_epi32&expand=1852)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi64_epi32&expand=1852)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
@@ -12467,7 +12467,7 @@ pub unsafe fn _mm512_cvtsepi64_epi32(a: __m512i) -> __m256i {
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtsepi64_epi32&expand=1853)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi64_epi32&expand=1853)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
@@ -12477,7 +12477,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_epi32(src: __m256i, k: __mmask8, a: __m512i)
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtsepi64_epi32&expand=1854)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtsepi64_epi32&expand=1854)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
@@ -12487,7 +12487,7 @@ pub unsafe fn _mm512_maskz_cvtsepi64_epi32(k: __mmask8, a: __m512i) -> __m256i {
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsepi64_epi32&expand=1849)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsepi64_epi32&expand=1849)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
@@ -12501,7 +12501,7 @@ pub unsafe fn _mm256_cvtsepi64_epi32(a: __m256i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtsepi64_epi32&expand=1850)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi64_epi32&expand=1850)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
@@ -12511,7 +12511,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_epi32(src: __m128i, k: __mmask8, a: __m256i)
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtsepi64_epi32&expand=1851)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtsepi64_epi32&expand=1851)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
@@ -12521,7 +12521,7 @@ pub unsafe fn _mm256_maskz_cvtsepi64_epi32(k: __mmask8, a: __m256i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsepi64_epi32&expand=1846)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsepi64_epi32&expand=1846)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
@@ -12535,7 +12535,7 @@ pub unsafe fn _mm_cvtsepi64_epi32(a: __m128i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsepi64_epi32&expand=1847)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi64_epi32&expand=1847)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
@@ -12545,7 +12545,7 @@ pub unsafe fn _mm_mask_cvtsepi64_epi32(src: __m128i, k: __mmask8, a: __m128i) ->
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsepi64_epi32&expand=1848)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtsepi64_epi32&expand=1848)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
@@ -12555,7 +12555,7 @@ pub unsafe fn _mm_maskz_cvtsepi64_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtsepi64_epi16&expand=1843)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi64_epi16&expand=1843)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
@@ -12569,7 +12569,7 @@ pub unsafe fn _mm512_cvtsepi64_epi16(a: __m512i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtsepi64_epi16&expand=1844)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi64_epi16&expand=1844)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
@@ -12579,7 +12579,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_epi16(src: __m128i, k: __mmask8, a: __m512i)
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtsepi64_epi16&expand=1845)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtsepi64_epi16&expand=1845)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
@@ -12589,7 +12589,7 @@ pub unsafe fn _mm512_maskz_cvtsepi64_epi16(k: __mmask8, a: __m512i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsepi64_epi16&expand=1840)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsepi64_epi16&expand=1840)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
@@ -12603,7 +12603,7 @@ pub unsafe fn _mm256_cvtsepi64_epi16(a: __m256i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtsepi64_epi16&expand=1841)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi64_epi16&expand=1841)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
@@ -12613,7 +12613,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_epi16(src: __m128i, k: __mmask8, a: __m256i)
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtsepi64_epi16&expand=1842)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtsepi64_epi16&expand=1842)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
@@ -12623,7 +12623,7 @@ pub unsafe fn _mm256_maskz_cvtsepi64_epi16(k: __mmask8, a: __m256i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsepi64_epi16&expand=1837)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsepi64_epi16&expand=1837)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
@@ -12637,7 +12637,7 @@ pub unsafe fn _mm_cvtsepi64_epi16(a: __m128i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsepi64_epi16&expand=1838)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi64_epi16&expand=1838)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
@@ -12647,7 +12647,7 @@ pub unsafe fn _mm_mask_cvtsepi64_epi16(src: __m128i, k: __mmask8, a: __m128i) ->
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsepi64_epi16&expand=1839)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtsepi64_epi16&expand=1839)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
@@ -12657,7 +12657,7 @@ pub unsafe fn _mm_maskz_cvtsepi64_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtsepi64_epi8&expand=1861)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsepi64_epi8&expand=1861)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
@@ -12671,7 +12671,7 @@ pub unsafe fn _mm512_cvtsepi64_epi8(a: __m512i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtsepi64_epi8&expand=1862)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi64_epi8&expand=1862)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
@@ -12681,7 +12681,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_epi8(src: __m128i, k: __mmask8, a: __m512i)
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtsepi64_epi8&expand=1863)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtsepi64_epi8&expand=1863)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
@@ -12691,7 +12691,7 @@ pub unsafe fn _mm512_maskz_cvtsepi64_epi8(k: __mmask8, a: __m512i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsepi64_epi8&expand=1858)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtsepi64_epi8&expand=1858)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
@@ -12705,7 +12705,7 @@ pub unsafe fn _mm256_cvtsepi64_epi8(a: __m256i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtsepi64_epi8&expand=1859)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi64_epi8&expand=1859)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
@@ -12715,7 +12715,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_epi8(src: __m128i, k: __mmask8, a: __m256i)
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtsepi64_epi8&expand=1860)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtsepi64_epi8&expand=1860)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
@@ -12725,7 +12725,7 @@ pub unsafe fn _mm256_maskz_cvtsepi64_epi8(k: __mmask8, a: __m256i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsepi64_epi8&expand=1855)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsepi64_epi8&expand=1855)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
@@ -12739,7 +12739,7 @@ pub unsafe fn _mm_cvtsepi64_epi8(a: __m128i) -> __m128i {
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsepi64_epi8&expand=1856)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi64_epi8&expand=1856)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
@@ -12749,7 +12749,7 @@ pub unsafe fn _mm_mask_cvtsepi64_epi8(src: __m128i, k: __mmask8, a: __m128i) ->
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsepi64_epi8&expand=1857)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtsepi64_epi8&expand=1857)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
@@ -12759,7 +12759,7 @@ pub unsafe fn _mm_maskz_cvtsepi64_epi8(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtusepi32_epi16&expand=2054)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtusepi32_epi16&expand=2054)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
@@ -12773,7 +12773,7 @@ pub unsafe fn _mm512_cvtusepi32_epi16(a: __m512i) -> __m256i {
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtusepi32_epi16&expand=2055)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi32_epi16&expand=2055)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
@@ -12783,7 +12783,7 @@ pub unsafe fn _mm512_mask_cvtusepi32_epi16(src: __m256i, k: __mmask16, a: __m512
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtusepi32_epi16&expand=2056)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtusepi32_epi16&expand=2056)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
@@ -12797,7 +12797,7 @@ pub unsafe fn _mm512_maskz_cvtusepi32_epi16(k: __mmask16, a: __m512i) -> __m256i
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtusepi32_epi16&expand=2051)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtusepi32_epi16&expand=2051)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
@@ -12811,7 +12811,7 @@ pub unsafe fn _mm256_cvtusepi32_epi16(a: __m256i) -> __m128i {
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtusepi32_epi16&expand=2052)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi32_epi16&expand=2052)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
@@ -12821,7 +12821,7 @@ pub unsafe fn _mm256_mask_cvtusepi32_epi16(src: __m128i, k: __mmask8, a: __m256i
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtusepi32_epi16&expand=2053)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtusepi32_epi16&expand=2053)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
@@ -12835,7 +12835,7 @@ pub unsafe fn _mm256_maskz_cvtusepi32_epi16(k: __mmask8, a: __m256i) -> __m128i
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtusepi32_epi16&expand=2048)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtusepi32_epi16&expand=2048)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
@@ -12849,7 +12849,7 @@ pub unsafe fn _mm_cvtusepi32_epi16(a: __m128i) -> __m128i {
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtusepi32_epi16&expand=2049)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi32_epi16&expand=2049)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
@@ -12859,7 +12859,7 @@ pub unsafe fn _mm_mask_cvtusepi32_epi16(src: __m128i, k: __mmask8, a: __m128i) -
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtusepi32_epi16&expand=2050)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtusepi32_epi16&expand=2050)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
@@ -12873,7 +12873,7 @@ pub unsafe fn _mm_maskz_cvtusepi32_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed unsigned 32-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtusepi32_epi8&expand=2063)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtusepi32_epi8&expand=2063)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
@@ -12887,7 +12887,7 @@ pub unsafe fn _mm512_cvtusepi32_epi8(a: __m512i) -> __m128i {
/// Convert packed unsigned 32-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtusepi32_epi8&expand=2064)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi32_epi8&expand=2064)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
@@ -12897,7 +12897,7 @@ pub unsafe fn _mm512_mask_cvtusepi32_epi8(src: __m128i, k: __mmask16, a: __m512i
/// Convert packed unsigned 32-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtusepi32_epi8&expand=2065)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtusepi32_epi8&expand=2065)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
@@ -12907,7 +12907,7 @@ pub unsafe fn _mm512_maskz_cvtusepi32_epi8(k: __mmask16, a: __m512i) -> __m128i
/// Convert packed unsigned 32-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtusepi32_epi8&expand=2060)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtusepi32_epi8&expand=2060)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
@@ -12921,7 +12921,7 @@ pub unsafe fn _mm256_cvtusepi32_epi8(a: __m256i) -> __m128i {
/// Convert packed unsigned 32-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtusepi32_epi8&expand=2061)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi32_epi8&expand=2061)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
@@ -12931,7 +12931,7 @@ pub unsafe fn _mm256_mask_cvtusepi32_epi8(src: __m128i, k: __mmask8, a: __m256i)
/// Convert packed unsigned 32-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtusepi32_epi8&expand=2062)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtusepi32_epi8&expand=2062)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
@@ -12945,7 +12945,7 @@ pub unsafe fn _mm256_maskz_cvtusepi32_epi8(k: __mmask8, a: __m256i) -> __m128i {
/// Convert packed unsigned 32-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtusepi32_epi8&expand=2057)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtusepi32_epi8&expand=2057)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
@@ -12959,7 +12959,7 @@ pub unsafe fn _mm_cvtusepi32_epi8(a: __m128i) -> __m128i {
/// Convert packed unsigned 32-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtusepi32_epi8&expand=2058)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi32_epi8&expand=2058)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
@@ -12969,7 +12969,7 @@ pub unsafe fn _mm_mask_cvtusepi32_epi8(src: __m128i, k: __mmask8, a: __m128i) ->
/// Convert packed unsigned 32-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtusepi32_epi8&expand=2059)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtusepi32_epi8&expand=2059)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
@@ -12983,7 +12983,7 @@ pub unsafe fn _mm_maskz_cvtusepi32_epi8(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 32-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtusepi64_epi32&expand=2087)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtusepi64_epi32&expand=2087)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
@@ -12997,7 +12997,7 @@ pub unsafe fn _mm512_cvtusepi64_epi32(a: __m512i) -> __m256i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 32-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtusepi64_epi32&expand=2088)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi64_epi32&expand=2088)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
@@ -13007,7 +13007,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_epi32(src: __m256i, k: __mmask8, a: __m512i
/// Convert packed unsigned 64-bit integers in a to packed unsigned 32-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtusepi64_epi32&expand=2089)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtusepi64_epi32&expand=2089)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
@@ -13021,7 +13021,7 @@ pub unsafe fn _mm512_maskz_cvtusepi64_epi32(k: __mmask8, a: __m512i) -> __m256i
/// Convert packed unsigned 64-bit integers in a to packed unsigned 32-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtusepi64_epi32&expand=2084)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtusepi64_epi32&expand=2084)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
@@ -13035,7 +13035,7 @@ pub unsafe fn _mm256_cvtusepi64_epi32(a: __m256i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 32-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtusepi64_epi32&expand=2085)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi64_epi32&expand=2085)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
@@ -13045,7 +13045,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_epi32(src: __m128i, k: __mmask8, a: __m256i
/// Convert packed unsigned 64-bit integers in a to packed unsigned 32-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtusepi64_epi32&expand=2086)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtusepi64_epi32&expand=2086)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
@@ -13059,7 +13059,7 @@ pub unsafe fn _mm256_maskz_cvtusepi64_epi32(k: __mmask8, a: __m256i) -> __m128i
/// Convert packed unsigned 64-bit integers in a to packed unsigned 32-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtusepi64_epi32&expand=2081)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtusepi64_epi32&expand=2081)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
@@ -13073,7 +13073,7 @@ pub unsafe fn _mm_cvtusepi64_epi32(a: __m128i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 32-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtusepi64_epi32&expand=2082)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi64_epi32&expand=2082)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
@@ -13083,7 +13083,7 @@ pub unsafe fn _mm_mask_cvtusepi64_epi32(src: __m128i, k: __mmask8, a: __m128i) -
/// Convert packed unsigned 64-bit integers in a to packed unsigned 32-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtusepi64_epi32&expand=2083)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtusepi64_epi32&expand=2083)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
@@ -13097,7 +13097,7 @@ pub unsafe fn _mm_maskz_cvtusepi64_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtusepi64_epi16&expand=2078)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtusepi64_epi16&expand=2078)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
@@ -13111,7 +13111,7 @@ pub unsafe fn _mm512_cvtusepi64_epi16(a: __m512i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtusepi64_epi16&expand=2079)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi64_epi16&expand=2079)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
@@ -13121,7 +13121,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_epi16(src: __m128i, k: __mmask8, a: __m512i
/// Convert packed unsigned 64-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtusepi64_epi16&expand=2080)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtusepi64_epi16&expand=2080)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
@@ -13131,7 +13131,7 @@ pub unsafe fn _mm512_maskz_cvtusepi64_epi16(k: __mmask8, a: __m512i) -> __m128i
/// Convert packed unsigned 64-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtusepi64_epi16&expand=2075)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtusepi64_epi16&expand=2075)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
@@ -13145,7 +13145,7 @@ pub unsafe fn _mm256_cvtusepi64_epi16(a: __m256i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtusepi64_epi16&expand=2076)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi64_epi16&expand=2076)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
@@ -13155,7 +13155,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_epi16(src: __m128i, k: __mmask8, a: __m256i
/// Convert packed unsigned 64-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtusepi64_epi16&expand=2077)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtusepi64_epi16&expand=2077)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
@@ -13169,7 +13169,7 @@ pub unsafe fn _mm256_maskz_cvtusepi64_epi16(k: __mmask8, a: __m256i) -> __m128i
/// Convert packed unsigned 64-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtusepi64_epi16&expand=2072)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtusepi64_epi16&expand=2072)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
@@ -13183,7 +13183,7 @@ pub unsafe fn _mm_cvtusepi64_epi16(a: __m128i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtusepi64_epi16&expand=2073)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi64_epi16&expand=2073)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
@@ -13193,7 +13193,7 @@ pub unsafe fn _mm_mask_cvtusepi64_epi16(src: __m128i, k: __mmask8, a: __m128i) -
/// Convert packed unsigned 64-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtusepi64_epi16&expand=2074)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtusepi64_epi16&expand=2074)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
@@ -13207,7 +13207,7 @@ pub unsafe fn _mm_maskz_cvtusepi64_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtusepi64_epi8&expand=2096)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtusepi64_epi8&expand=2096)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
@@ -13221,7 +13221,7 @@ pub unsafe fn _mm512_cvtusepi64_epi8(a: __m512i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtusepi64_epi8&expand=2097)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi64_epi8&expand=2097)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
@@ -13231,7 +13231,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_epi8(src: __m128i, k: __mmask8, a: __m512i)
/// Convert packed unsigned 64-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtusepi64_epi8&expand=2098)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtusepi64_epi8&expand=2098)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
@@ -13241,7 +13241,7 @@ pub unsafe fn _mm512_maskz_cvtusepi64_epi8(k: __mmask8, a: __m512i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtusepi64_epi8&expand=2093)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvtusepi64_epi8&expand=2093)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
@@ -13255,7 +13255,7 @@ pub unsafe fn _mm256_cvtusepi64_epi8(a: __m256i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtusepi64_epi8&expand=2094)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi64_epi8&expand=2094)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
@@ -13265,7 +13265,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_epi8(src: __m128i, k: __mmask8, a: __m256i)
/// Convert packed unsigned 64-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtusepi64_epi8&expand=2095)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtusepi64_epi8&expand=2095)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
@@ -13279,7 +13279,7 @@ pub unsafe fn _mm256_maskz_cvtusepi64_epi8(k: __mmask8, a: __m256i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtusepi64_epi8&expand=2090)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtusepi64_epi8&expand=2090)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
@@ -13293,7 +13293,7 @@ pub unsafe fn _mm_cvtusepi64_epi8(a: __m128i) -> __m128i {
/// Convert packed unsigned 64-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtusepi64_epi8&expand=2091)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi64_epi8&expand=2091)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
@@ -13303,7 +13303,7 @@ pub unsafe fn _mm_mask_cvtusepi64_epi8(src: __m128i, k: __mmask8, a: __m128i) ->
/// Convert packed unsigned 64-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtusepi64_epi8&expand=2092)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtusepi64_epi8&expand=2092)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
@@ -13324,7 +13324,7 @@ pub unsafe fn _mm_maskz_cvtusepi64_epi8(k: __mmask8, a: __m128i) -> __m128i {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundps_epi32&expand=1335)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundps_epi32&expand=1335)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2dq, ROUNDING = 8))]
@@ -13346,7 +13346,7 @@ pub unsafe fn _mm512_cvt_roundps_epi32<const ROUNDING: i32>(a: __m512) -> __m512
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundps_epi32&expand=1336)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundps_epi32&expand=1336)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2dq, ROUNDING = 8))]
@@ -13372,7 +13372,7 @@ pub unsafe fn _mm512_mask_cvt_roundps_epi32<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundps_epi32&expand=1337)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundps_epi32&expand=1337)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2dq, ROUNDING = 8))]
@@ -13397,7 +13397,7 @@ pub unsafe fn _mm512_maskz_cvt_roundps_epi32<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundps_epu32&expand=1341)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundps_epu32&expand=1341)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2udq, ROUNDING = 8))]
@@ -13419,7 +13419,7 @@ pub unsafe fn _mm512_cvt_roundps_epu32<const ROUNDING: i32>(a: __m512) -> __m512
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundps_epu32&expand=1342)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundps_epu32&expand=1342)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2udq, ROUNDING = 8))]
@@ -13445,7 +13445,7 @@ pub unsafe fn _mm512_mask_cvt_roundps_epu32<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundps_epu32&expand=1343)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundps_epu32&expand=1343)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2udq, ROUNDING = 8))]
@@ -13464,7 +13464,7 @@ pub unsafe fn _mm512_maskz_cvt_roundps_epu32<const ROUNDING: i32>(
/// Convert packed single-precision (32-bit) floating-point elements in a to packed double-precision (64-bit) floating-point elements, and store the results in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundps_pd&expand=1347)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundps_pd&expand=1347)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2pd, SAE = 8))]
@@ -13480,7 +13480,7 @@ pub unsafe fn _mm512_cvt_roundps_pd<const SAE: i32>(a: __m256) -> __m512d {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundps_epi32&expand=1336)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundps_epi32&expand=1336)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2pd, SAE = 8))]
@@ -13500,7 +13500,7 @@ pub unsafe fn _mm512_mask_cvt_roundps_pd<const SAE: i32>(
/// Convert packed single-precision (32-bit) floating-point elements in a to packed double-precision (64-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundps_epi32&expand=1337)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundps_epi32&expand=1337)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2pd, SAE = 8))]
@@ -13522,7 +13522,7 @@ pub unsafe fn _mm512_maskz_cvt_roundps_pd<const SAE: i32>(k: __mmask8, a: __m256
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundpd_epi32&expand=1315)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundpd_epi32&expand=1315)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2dq, ROUNDING = 8))]
@@ -13544,7 +13544,7 @@ pub unsafe fn _mm512_cvt_roundpd_epi32<const ROUNDING: i32>(a: __m512d) -> __m25
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundpd_epi32&expand=1316)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundpd_epi32&expand=1316)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2dq, ROUNDING = 8))]
@@ -13595,7 +13595,7 @@ pub unsafe fn _mm512_maskz_cvt_roundpd_epi32<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundpd_epu32&expand=1321)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundpd_epu32&expand=1321)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2udq, ROUNDING = 8))]
@@ -13617,7 +13617,7 @@ pub unsafe fn _mm512_cvt_roundpd_epu32<const ROUNDING: i32>(a: __m512d) -> __m25
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundpd_epu32&expand=1322)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundpd_epu32&expand=1322)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2udq, ROUNDING = 8))]
@@ -13668,7 +13668,7 @@ pub unsafe fn _mm512_maskz_cvt_roundpd_epu32<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundpd_ps&expand=1327)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundpd_ps&expand=1327)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2ps, ROUNDING = 8))]
@@ -13690,7 +13690,7 @@ pub unsafe fn _mm512_cvt_roundpd_ps<const ROUNDING: i32>(a: __m512d) -> __m256 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundpd_ps&expand=1328)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundpd_ps&expand=1328)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2ps, ROUNDING = 8))]
@@ -13716,7 +13716,7 @@ pub unsafe fn _mm512_mask_cvt_roundpd_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundpd_ps&expand=1329)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundpd_ps&expand=1329)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtpd2ps, ROUNDING = 8))]
@@ -13738,7 +13738,7 @@ pub unsafe fn _mm512_maskz_cvt_roundpd_ps<const ROUNDING: i32>(k: __mmask8, a: _
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi32_ps&expand=1294)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundepi32_ps&expand=1294)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtdq2ps, ROUNDING = 8))]
@@ -13759,7 +13759,7 @@ pub unsafe fn _mm512_cvt_roundepi32_ps<const ROUNDING: i32>(a: __m512i) -> __m51
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi32_ps&expand=1295)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundepi32_ps&expand=1295)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtdq2ps, ROUNDING = 8))]
@@ -13784,7 +13784,7 @@ pub unsafe fn _mm512_mask_cvt_roundepi32_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi32_ps&expand=1296)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundepi32_ps&expand=1296)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtdq2ps, ROUNDING = 8))]
@@ -13809,7 +13809,7 @@ pub unsafe fn _mm512_maskz_cvt_roundepi32_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepu32_ps&expand=1303)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundepu32_ps&expand=1303)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtudq2ps, ROUNDING = 8))]
@@ -13830,7 +13830,7 @@ pub unsafe fn _mm512_cvt_roundepu32_ps<const ROUNDING: i32>(a: __m512i) -> __m51
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepu32_ps&expand=1304)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundepu32_ps&expand=1304)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtudq2ps, ROUNDING = 8))]
@@ -13855,7 +13855,7 @@ pub unsafe fn _mm512_mask_cvt_roundepu32_ps<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepu32_ps&expand=1305)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundepu32_ps&expand=1305)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtudq2ps, ROUNDING = 8))]
@@ -13874,7 +13874,7 @@ pub unsafe fn _mm512_maskz_cvt_roundepu32_ps<const ROUNDING: i32>(
/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) floating-point elements, and store the results in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundps_ph&expand=1354)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundps_ph&expand=1354)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2ph, SAE = 8))]
@@ -13890,7 +13890,7 @@ pub unsafe fn _mm512_cvt_roundps_ph<const SAE: i32>(a: __m512) -> __m256i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundps_ph&expand=1355)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundps_ph&expand=1355)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2ph, SAE = 8))]
@@ -13910,7 +13910,7 @@ pub unsafe fn _mm512_mask_cvt_roundps_ph<const SAE: i32>(
/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundps_ph&expand=1356)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundps_ph&expand=1356)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2ph, SAE = 8))]
@@ -13931,7 +13931,7 @@ pub unsafe fn _mm512_maskz_cvt_roundps_ph<const SAE: i32>(k: __mmask16, a: __m51
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvt_roundps_ph&expand=1352)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvt_roundps_ph&expand=1352)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))]
@@ -13941,7 +13941,7 @@ pub unsafe fn _mm256_mask_cvt_roundps_ph<const IMM8: i32>(
k: __mmask8,
a: __m256,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x8();
let src = src.as_i16x8();
let r = vcvtps2ph256(a, IMM8, src, k);
@@ -13956,13 +13956,13 @@ pub unsafe fn _mm256_mask_cvt_roundps_ph<const IMM8: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvt_roundps_ph&expand=1353)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvt_roundps_ph&expand=1353)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_cvt_roundps_ph<const IMM8: i32>(k: __mmask8, a: __m256) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x8();
let zero = _mm_setzero_si128().as_i16x8();
let r = vcvtps2ph256(a, IMM8, zero, k);
@@ -13977,7 +13977,7 @@ pub unsafe fn _mm256_maskz_cvt_roundps_ph<const IMM8: i32>(k: __mmask8, a: __m25
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundps_ph&expand=1350)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvt_roundps_ph&expand=1350)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))]
@@ -13987,7 +13987,7 @@ pub unsafe fn _mm_mask_cvt_roundps_ph<const IMM8: i32>(
k: __mmask8,
a: __m128,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let src = src.as_i16x8();
let r = vcvtps2ph128(a, IMM8, src, k);
@@ -14002,13 +14002,13 @@ pub unsafe fn _mm_mask_cvt_roundps_ph<const IMM8: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundps_ph&expand=1351)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvt_roundps_ph&expand=1351)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_cvt_roundps_ph<const IMM8: i32>(k: __mmask8, a: __m128) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let zero = _mm_setzero_si128().as_i16x8();
let r = vcvtps2ph128(a, IMM8, zero, k);
@@ -14018,7 +14018,7 @@ pub unsafe fn _mm_maskz_cvt_roundps_ph<const IMM8: i32>(k: __mmask8, a: __m128)
/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) floating-point elements, and store the results in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtps_ph&expand=1778)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtps_ph&expand=1778)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2ph, SAE = 8))]
@@ -14034,7 +14034,7 @@ pub unsafe fn _mm512_cvtps_ph<const SAE: i32>(a: __m512) -> __m256i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtps_ph&expand=1779)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtps_ph&expand=1779)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2ph, SAE = 8))]
@@ -14054,7 +14054,7 @@ pub unsafe fn _mm512_mask_cvtps_ph<const SAE: i32>(
/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtps_ph&expand=1780)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtps_ph&expand=1780)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtps2ph, SAE = 8))]
@@ -14075,7 +14075,7 @@ pub unsafe fn _mm512_maskz_cvtps_ph<const SAE: i32>(k: __mmask16, a: __m512) ->
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtps_ph&expand=1776)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtps_ph&expand=1776)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))]
@@ -14085,7 +14085,7 @@ pub unsafe fn _mm256_mask_cvtps_ph<const IMM8: i32>(
k: __mmask8,
a: __m256,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x8();
let src = src.as_i16x8();
let r = vcvtps2ph256(a, IMM8, src, k);
@@ -14100,13 +14100,13 @@ pub unsafe fn _mm256_mask_cvtps_ph<const IMM8: i32>(
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtps_ph&expand=1777)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtps_ph&expand=1777)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_cvtps_ph<const IMM8: i32>(k: __mmask8, a: __m256) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x8();
let zero = _mm_setzero_si128().as_i16x8();
let r = vcvtps2ph256(a, IMM8, zero, k);
@@ -14121,13 +14121,13 @@ pub unsafe fn _mm256_maskz_cvtps_ph<const IMM8: i32>(k: __mmask8, a: __m256) ->
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtps_ph&expand=1773)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtps_ph&expand=1773)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm_mask_cvtps_ph<const IMM8: i32>(src: __m128i, k: __mmask8, a: __m128) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let src = src.as_i16x8();
let r = vcvtps2ph128(a, IMM8, src, k);
@@ -14142,13 +14142,13 @@ pub unsafe fn _mm_mask_cvtps_ph<const IMM8: i32>(src: __m128i, k: __mmask8, a: _
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtps_ph&expand=1774)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtps_ph&expand=1774)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtps2ph, IMM8 = 8))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_cvtps_ph<const IMM8: i32>(k: __mmask8, a: __m128) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let zero = _mm_setzero_si128().as_i16x8();
let r = vcvtps2ph128(a, IMM8, zero, k);
@@ -14158,7 +14158,7 @@ pub unsafe fn _mm_maskz_cvtps_ph<const IMM8: i32>(k: __mmask8, a: __m128) -> __m
/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_ps&expand=1332)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvt_roundph_ps&expand=1332)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtph2ps, SAE = 8))]
@@ -14174,7 +14174,7 @@ pub unsafe fn _mm512_cvt_roundph_ps<const SAE: i32>(a: __m256i) -> __m512 {
/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_ps&expand=1333)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvt_roundph_ps&expand=1333)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtph2ps, SAE = 8))]
@@ -14194,7 +14194,7 @@ pub unsafe fn _mm512_mask_cvt_roundph_ps<const SAE: i32>(
/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_ps&expand=1334)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvt_roundph_ps&expand=1334)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtph2ps, SAE = 8))]
@@ -14209,7 +14209,7 @@ pub unsafe fn _mm512_maskz_cvt_roundph_ps<const SAE: i32>(k: __mmask16, a: __m25
/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_ps&expand=1723)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtph_ps&expand=1723)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtph2ps))]
@@ -14224,7 +14224,7 @@ pub unsafe fn _mm512_cvtph_ps(a: __m256i) -> __m512 {
/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_ps&expand=1724)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtph_ps&expand=1724)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtph2ps))]
@@ -14239,7 +14239,7 @@ pub unsafe fn _mm512_mask_cvtph_ps(src: __m512, k: __mmask16, a: __m256i) -> __m
/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_ps&expand=1725)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtph_ps&expand=1725)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtph2ps))]
@@ -14254,7 +14254,7 @@ pub unsafe fn _mm512_maskz_cvtph_ps(k: __mmask16, a: __m256i) -> __m512 {
/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_ps&expand=1721)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtph_ps&expand=1721)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2ps))]
@@ -14265,7 +14265,7 @@ pub unsafe fn _mm256_mask_cvtph_ps(src: __m256, k: __mmask8, a: __m128i) -> __m2
/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_ps&expand=1722)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvtph_ps&expand=1722)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2ps))]
@@ -14277,7 +14277,7 @@ pub unsafe fn _mm256_maskz_cvtph_ps(k: __mmask8, a: __m128i) -> __m256 {
/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_ps&expand=1718)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtph_ps&expand=1718)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2ps))]
@@ -14288,7 +14288,7 @@ pub unsafe fn _mm_mask_cvtph_ps(src: __m128, k: __mmask8, a: __m128i) -> __m128
/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_ps&expand=1719)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvtph_ps&expand=1719)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvtph2ps))]
@@ -14301,7 +14301,7 @@ pub unsafe fn _mm_maskz_cvtph_ps(k: __mmask8, a: __m128i) -> __m128 {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundps_epi32&expand=1916)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtt_roundps_epi32&expand=1916)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttps2dq, SAE = 8))]
@@ -14317,7 +14317,7 @@ pub unsafe fn _mm512_cvtt_roundps_epi32<const SAE: i32>(a: __m512) -> __m512i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundps_epi32&expand=1917)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtt_roundps_epi32&expand=1917)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttps2dq, SAE = 8))]
@@ -14337,7 +14337,7 @@ pub unsafe fn _mm512_mask_cvtt_roundps_epi32<const SAE: i32>(
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundps_epi32&expand=1918)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtt_roundps_epi32&expand=1918)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttps2dq, SAE = 8))]
@@ -14353,7 +14353,7 @@ pub unsafe fn _mm512_maskz_cvtt_roundps_epi32<const SAE: i32>(k: __mmask16, a: _
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundps_epu32&expand=1922)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtt_roundps_epu32&expand=1922)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttps2udq, SAE = 8))]
@@ -14369,7 +14369,7 @@ pub unsafe fn _mm512_cvtt_roundps_epu32<const SAE: i32>(a: __m512) -> __m512i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundps_epu32&expand=1923)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtt_roundps_epu32&expand=1923)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttps2udq, SAE = 8))]
@@ -14389,7 +14389,7 @@ pub unsafe fn _mm512_mask_cvtt_roundps_epu32<const SAE: i32>(
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundps_epu32&expand=1924)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtt_roundps_epu32&expand=1924)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttps2udq, SAE = 8))]
@@ -14405,7 +14405,7 @@ pub unsafe fn _mm512_maskz_cvtt_roundps_epu32<const SAE: i32>(k: __mmask16, a: _
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundpd_epi32&expand=1904)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtt_roundpd_epi32&expand=1904)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttpd2dq, SAE = 8))]
@@ -14421,7 +14421,7 @@ pub unsafe fn _mm512_cvtt_roundpd_epi32<const SAE: i32>(a: __m512d) -> __m256i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundpd_epi32&expand=1905)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtt_roundpd_epi32&expand=1905)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttpd2dq, SAE = 8))]
@@ -14441,7 +14441,7 @@ pub unsafe fn _mm512_mask_cvtt_roundpd_epi32<const SAE: i32>(
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundps_epi32&expand=1918)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtt_roundps_epi32&expand=1918)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttpd2dq, SAE = 8))]
@@ -14457,7 +14457,7 @@ pub unsafe fn _mm512_maskz_cvtt_roundpd_epi32<const SAE: i32>(k: __mmask8, a: __
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundpd_epu32&expand=1910)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtt_roundpd_epu32&expand=1910)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttpd2udq, SAE = 8))]
@@ -14473,7 +14473,7 @@ pub unsafe fn _mm512_cvtt_roundpd_epu32<const SAE: i32>(a: __m512d) -> __m256i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundpd_epu32&expand=1911)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtt_roundpd_epu32&expand=1911)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttpd2udq, SAE = 8))]
@@ -14492,7 +14492,7 @@ pub unsafe fn _mm512_mask_cvtt_roundpd_epu32<const SAE: i32>(
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttps_epi32&expand=1984)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvttps_epi32&expand=1984)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttps2dq))]
@@ -14507,7 +14507,7 @@ pub unsafe fn _mm512_cvttps_epi32(a: __m512) -> __m512i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttps_epi32&expand=1985)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvttps_epi32&expand=1985)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttps2dq))]
@@ -14522,7 +14522,7 @@ pub unsafe fn _mm512_mask_cvttps_epi32(src: __m512i, k: __mmask16, a: __m512) ->
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttps_epi32&expand=1986)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvttps_epi32&expand=1986)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttps2dq))]
@@ -14537,7 +14537,7 @@ pub unsafe fn _mm512_maskz_cvttps_epi32(k: __mmask16, a: __m512) -> __m512i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttps_epi32&expand=1982)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvttps_epi32&expand=1982)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttps2dq))]
@@ -14547,7 +14547,7 @@ pub unsafe fn _mm256_mask_cvttps_epi32(src: __m256i, k: __mmask8, a: __m256) ->
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttps_epi32&expand=1983)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvttps_epi32&expand=1983)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttps2dq))]
@@ -14561,7 +14561,7 @@ pub unsafe fn _mm256_maskz_cvttps_epi32(k: __mmask8, a: __m256) -> __m256i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttps_epi32&expand=1979)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvttps_epi32&expand=1979)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttps2dq))]
@@ -14571,7 +14571,7 @@ pub unsafe fn _mm_mask_cvttps_epi32(src: __m128i, k: __mmask8, a: __m128) -> __m
/// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttps_epi32&expand=1980)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvttps_epi32&expand=1980)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttps2dq))]
@@ -14585,7 +14585,7 @@ pub unsafe fn _mm_maskz_cvttps_epi32(k: __mmask8, a: __m128) -> __m128i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttps_epu32&expand=2002)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvttps_epu32&expand=2002)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttps2udq))]
@@ -14600,7 +14600,7 @@ pub unsafe fn _mm512_cvttps_epu32(a: __m512) -> __m512i {
/// Convert packed double-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttps_epu32&expand=2003)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvttps_epu32&expand=2003)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttps2udq))]
@@ -14615,7 +14615,7 @@ pub unsafe fn _mm512_mask_cvttps_epu32(src: __m512i, k: __mmask16, a: __m512) ->
/// Convert packed double-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttps_epu32&expand=2004)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvttps_epu32&expand=2004)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttps2udq))]
@@ -14630,7 +14630,7 @@ pub unsafe fn _mm512_maskz_cvttps_epu32(k: __mmask16, a: __m512) -> __m512i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttps_epu32&expand=1999)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvttps_epu32&expand=1999)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttps2udq))]
@@ -14644,7 +14644,7 @@ pub unsafe fn _mm256_cvttps_epu32(a: __m256) -> __m256i {
/// Convert packed double-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttps_epu32&expand=2000)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvttps_epu32&expand=2000)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttps2udq))]
@@ -14654,7 +14654,7 @@ pub unsafe fn _mm256_mask_cvttps_epu32(src: __m256i, k: __mmask8, a: __m256) ->
/// Convert packed double-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttps_epu32&expand=2001)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvttps_epu32&expand=2001)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttps2udq))]
@@ -14668,7 +14668,7 @@ pub unsafe fn _mm256_maskz_cvttps_epu32(k: __mmask8, a: __m256) -> __m256i {
/// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttps_epu32&expand=1996)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttps_epu32&expand=1996)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttps2udq))]
@@ -14682,7 +14682,7 @@ pub unsafe fn _mm_cvttps_epu32(a: __m128) -> __m128i {
/// Convert packed double-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttps_epu32&expand=1997)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvttps_epu32&expand=1997)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttps2udq))]
@@ -14692,7 +14692,7 @@ pub unsafe fn _mm_mask_cvttps_epu32(src: __m128i, k: __mmask8, a: __m128) -> __m
/// Convert packed double-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttps_epu32&expand=1998)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvttps_epu32&expand=1998)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttps2udq))]
@@ -14707,7 +14707,7 @@ pub unsafe fn _mm_maskz_cvttps_epu32(k: __mmask8, a: __m128) -> __m128i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundpd_epu32&expand=1912)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvtt_roundpd_epu32&expand=1912)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttpd2udq, SAE = 8))]
@@ -14722,7 +14722,7 @@ pub unsafe fn _mm512_maskz_cvtt_roundpd_epu32<const SAE: i32>(k: __mmask8, a: __
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttpd_epi32&expand=1947)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvttpd_epi32&expand=1947)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttpd2dq))]
@@ -14737,7 +14737,7 @@ pub unsafe fn _mm512_cvttpd_epi32(a: __m512d) -> __m256i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttpd_epi32&expand=1948)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvttpd_epi32&expand=1948)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttpd2dq))]
@@ -14752,7 +14752,7 @@ pub unsafe fn _mm512_mask_cvttpd_epi32(src: __m256i, k: __mmask8, a: __m512d) ->
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttpd_epi32&expand=1949)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvttpd_epi32&expand=1949)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttpd2dq))]
@@ -14767,7 +14767,7 @@ pub unsafe fn _mm512_maskz_cvttpd_epi32(k: __mmask8, a: __m512d) -> __m256i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttpd_epi32&expand=1945)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvttpd_epi32&expand=1945)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttpd2dq))]
@@ -14777,7 +14777,7 @@ pub unsafe fn _mm256_mask_cvttpd_epi32(src: __m128i, k: __mmask8, a: __m256d) ->
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttpd_epi32&expand=1946)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvttpd_epi32&expand=1946)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttpd2dq))]
@@ -14791,7 +14791,7 @@ pub unsafe fn _mm256_maskz_cvttpd_epi32(k: __mmask8, a: __m256d) -> __m128i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttpd_epi32&expand=1942)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvttpd_epi32&expand=1942)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttpd2dq))]
@@ -14801,7 +14801,7 @@ pub unsafe fn _mm_mask_cvttpd_epi32(src: __m128i, k: __mmask8, a: __m128d) -> __
/// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttpd_epi32&expand=1943)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvttpd_epi32&expand=1943)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttpd2dq))]
@@ -14815,7 +14815,7 @@ pub unsafe fn _mm_maskz_cvttpd_epi32(k: __mmask8, a: __m128d) -> __m128i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttpd_epu32&expand=1965)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvttpd_epu32&expand=1965)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttpd2udq))]
@@ -14830,7 +14830,7 @@ pub unsafe fn _mm512_cvttpd_epu32(a: __m512d) -> __m256i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttpd_epu32&expand=1966)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvttpd_epu32&expand=1966)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttpd2udq))]
@@ -14845,7 +14845,7 @@ pub unsafe fn _mm512_mask_cvttpd_epu32(src: __m256i, k: __mmask8, a: __m512d) ->
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttpd_epu32&expand=1967)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_cvttpd_epu32&expand=1967)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvttpd2udq))]
@@ -14860,7 +14860,7 @@ pub unsafe fn _mm512_maskz_cvttpd_epu32(k: __mmask8, a: __m512d) -> __m256i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttpd_epu32&expand=1962)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cvttpd_epu32&expand=1962)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttpd2udq))]
@@ -14874,7 +14874,7 @@ pub unsafe fn _mm256_cvttpd_epu32(a: __m256d) -> __m128i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttpd_epu32&expand=1963)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvttpd_epu32&expand=1963)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttpd2udq))]
@@ -14884,7 +14884,7 @@ pub unsafe fn _mm256_mask_cvttpd_epu32(src: __m128i, k: __mmask8, a: __m256d) ->
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttpd_epu32&expand=1964)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_cvttpd_epu32&expand=1964)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttpd2udq))]
@@ -14898,7 +14898,7 @@ pub unsafe fn _mm256_maskz_cvttpd_epu32(k: __mmask8, a: __m256d) -> __m128i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttpd_epu32&expand=1959)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttpd_epu32&expand=1959)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttpd2udq))]
@@ -14912,7 +14912,7 @@ pub unsafe fn _mm_cvttpd_epu32(a: __m128d) -> __m128i {
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttpd_epu32&expand=1960)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvttpd_epu32&expand=1960)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttpd2udq))]
@@ -14922,7 +14922,7 @@ pub unsafe fn _mm_mask_cvttpd_epu32(src: __m128i, k: __mmask8, a: __m128d) -> __
/// Convert packed double-precision (64-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttpd_epu32&expand=1961)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_cvttpd_epu32&expand=1961)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcvttpd2udq))]
@@ -14936,7 +14936,7 @@ pub unsafe fn _mm_maskz_cvttpd_epu32(k: __mmask8, a: __m128d) -> __m128i {
/// Returns vector of type `__m512d` with all elements set to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setzero_pd&expand=5018)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setzero_pd&expand=5018)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vxorps))]
@@ -14947,7 +14947,7 @@ pub unsafe fn _mm512_setzero_pd() -> __m512d {
/// Returns vector of type `__m512d` with all elements set to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setzero_ps&expand=5021)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setzero_ps&expand=5021)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vxorps))]
@@ -14958,7 +14958,7 @@ pub unsafe fn _mm512_setzero_ps() -> __m512 {
/// Return vector of type __m512 with all elements set to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setzero&expand=5014)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setzero&expand=5014)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vxorps))]
@@ -14969,7 +14969,7 @@ pub unsafe fn _mm512_setzero() -> __m512 {
/// Returns vector of type `__m512i` with all elements set to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setzero_si512&expand=5024)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setzero_si512&expand=5024)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vxorps))]
@@ -14980,7 +14980,7 @@ pub unsafe fn _mm512_setzero_si512() -> __m512i {
/// Return vector of type __m512i with all elements set to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setzero_epi32&expand=5015)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setzero_epi32&expand=5015)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vxorps))]
@@ -14992,7 +14992,7 @@ pub unsafe fn _mm512_setzero_epi32() -> __m512i {
/// Sets packed 32-bit integers in `dst` with the supplied values in reverse
/// order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setr_epi32&expand=4991)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr_epi32&expand=4991)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_setr_epi32(
@@ -15021,7 +15021,7 @@ pub unsafe fn _mm512_setr_epi32(
/// Set packed 8-bit integers in dst with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set_epi8&expand=4915)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set_epi8&expand=4915)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set_epi8(
@@ -15101,7 +15101,7 @@ pub unsafe fn _mm512_set_epi8(
/// Set packed 16-bit integers in dst with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set_epi16&expand=4905)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set_epi16&expand=4905)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set_epi16(
@@ -15147,7 +15147,7 @@ pub unsafe fn _mm512_set_epi16(
/// Set packed 32-bit integers in dst with the repeated 4 element sequence.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set4_epi32&expand=4982)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set4_epi32&expand=4982)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i {
@@ -15156,7 +15156,7 @@ pub unsafe fn _mm512_set4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i {
/// Set packed single-precision (32-bit) floating-point elements in dst with the repeated 4 element sequence.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set4_ps&expand=4985)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set4_ps&expand=4985)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 {
@@ -15165,7 +15165,7 @@ pub unsafe fn _mm512_set4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 {
/// Set packed double-precision (64-bit) floating-point elements in dst with the repeated 4 element sequence.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set4_pd&expand=4984)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set4_pd&expand=4984)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d {
@@ -15174,7 +15174,7 @@ pub unsafe fn _mm512_set4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d {
/// Set packed 32-bit integers in dst with the repeated 4 element sequence in reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setr4_epi32&expand=5009)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr4_epi32&expand=5009)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_setr4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i {
@@ -15183,7 +15183,7 @@ pub unsafe fn _mm512_setr4_epi32(d: i32, c: i32, b: i32, a: i32) -> __m512i {
/// Set packed single-precision (32-bit) floating-point elements in dst with the repeated 4 element sequence in reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setr4_ps&expand=5012)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr4_ps&expand=5012)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_setr4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 {
@@ -15192,7 +15192,7 @@ pub unsafe fn _mm512_setr4_ps(d: f32, c: f32, b: f32, a: f32) -> __m512 {
/// Set packed double-precision (64-bit) floating-point elements in dst with the repeated 4 element sequence in reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setr4_pd&expand=5011)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr4_pd&expand=5011)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_setr4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d {
@@ -15201,7 +15201,7 @@ pub unsafe fn _mm512_setr4_pd(d: f64, c: f64, b: f64, a: f64) -> __m512d {
/// Set packed 64-bit integers in dst with the supplied values.
///
-/// [Intel's documentation]( https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set_epi64&expand=4910)
+/// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set_epi64&expand=4910)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set_epi64(
@@ -15219,7 +15219,7 @@ pub unsafe fn _mm512_set_epi64(
/// Set packed 64-bit integers in dst with the supplied values in reverse order.
///
-/// [Intel's documentation]( https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setr_epi64&expand=4993)
+/// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr_epi64&expand=4993)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_setr_epi64(
@@ -15238,7 +15238,7 @@ pub unsafe fn _mm512_setr_epi64(
/// Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst. scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i32gather_pd&expand=3002)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32gather_pd&expand=3002)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))]
@@ -15255,7 +15255,7 @@ pub unsafe fn _mm512_i32gather_pd<const SCALE: i32>(offsets: __m256i, slice: *co
/// Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst using writemask k (elements are copied from src when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i32gather_pd&expand=3003)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32gather_pd&expand=3003)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgatherdpd, SCALE = 1))]
@@ -15276,7 +15276,7 @@ pub unsafe fn _mm512_mask_i32gather_pd<const SCALE: i32>(
/// Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst. scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i64gather_pd&expand=3092)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64gather_pd&expand=3092)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))]
@@ -15293,7 +15293,7 @@ pub unsafe fn _mm512_i64gather_pd<const SCALE: i32>(offsets: __m512i, slice: *co
/// Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst using writemask k (elements are copied from src when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i64gather_pd&expand=3093)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64gather_pd&expand=3093)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgatherqpd, SCALE = 1))]
@@ -15314,7 +15314,7 @@ pub unsafe fn _mm512_mask_i64gather_pd<const SCALE: i32>(
/// Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst. scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i64gather_ps&expand=3100)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64gather_ps&expand=3100)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))]
@@ -15331,7 +15331,7 @@ pub unsafe fn _mm512_i64gather_ps<const SCALE: i32>(offsets: __m512i, slice: *co
/// Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst using writemask k (elements are copied from src when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i64gather_ps&expand=3101)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64gather_ps&expand=3101)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgatherqps, SCALE = 1))]
@@ -15352,7 +15352,7 @@ pub unsafe fn _mm512_mask_i64gather_ps<const SCALE: i32>(
/// Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst. scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i32gather_ps&expand=3010)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32gather_ps&expand=3010)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgatherdps, SCALE = 1))]
@@ -15369,7 +15369,7 @@ pub unsafe fn _mm512_i32gather_ps<const SCALE: i32>(offsets: __m512i, slice: *co
/// Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst using writemask k (elements are copied from src when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i32gather_ps&expand=3011)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32gather_ps&expand=3011)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgatherdps, SCALE = 1))]
@@ -15390,7 +15390,7 @@ pub unsafe fn _mm512_mask_i32gather_ps<const SCALE: i32>(
/// Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst. scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i32gather_epi32&expand=2986)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32gather_epi32&expand=2986)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpgatherdd, SCALE = 1))]
@@ -15410,7 +15410,7 @@ pub unsafe fn _mm512_i32gather_epi32<const SCALE: i32>(
/// Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst using writemask k (elements are copied from src when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i32gather_epi32&expand=2987)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32gather_epi32&expand=2987)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpgatherdd, SCALE = 1))]
@@ -15432,7 +15432,7 @@ pub unsafe fn _mm512_mask_i32gather_epi32<const SCALE: i32>(
/// Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst. scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i32gather_epi64&expand=2994)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32gather_epi64&expand=2994)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))]
@@ -15452,7 +15452,7 @@ pub unsafe fn _mm512_i32gather_epi64<const SCALE: i32>(
/// Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst using writemask k (elements are copied from src when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i32gather_epi64&expand=2995)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32gather_epi64&expand=2995)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpgatherdq, SCALE = 1))]
@@ -15474,7 +15474,7 @@ pub unsafe fn _mm512_mask_i32gather_epi64<const SCALE: i32>(
/// Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst. scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i64gather_epi64&expand=3084)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64gather_epi64&expand=3084)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpgatherqq, SCALE = 1))]
@@ -15494,7 +15494,7 @@ pub unsafe fn _mm512_i64gather_epi64<const SCALE: i32>(
/// Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst using writemask k (elements are copied from src when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i64gather_epi64&expand=3085)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64gather_epi64&expand=3085)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpgatherqq, SCALE = 1))]
@@ -15516,7 +15516,7 @@ pub unsafe fn _mm512_mask_i64gather_epi64<const SCALE: i32>(
/// Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst. scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i64gather_epi32&expand=3074)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64gather_epi32&expand=3074)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpgatherqd, SCALE = 1))]
@@ -15536,7 +15536,7 @@ pub unsafe fn _mm512_i64gather_epi32<const SCALE: i32>(
/// Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale). Gathered elements are merged into dst using writemask k (elements are copied from src when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i64gather_epi32&expand=3075)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64gather_epi32&expand=3075)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpgatherqd, SCALE = 1))]
@@ -15558,7 +15558,7 @@ pub unsafe fn _mm512_mask_i64gather_epi32<const SCALE: i32>(
/// Scatter double-precision (64-bit) floating-point elements from a into memory using 32-bit indices. 64-bit elements are stored at addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i32scatter_pd&expand=3044)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32scatter_pd&expand=3044)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscatterdpd, SCALE = 1))]
@@ -15578,7 +15578,7 @@ pub unsafe fn _mm512_i32scatter_pd<const SCALE: i32>(
/// Scatter double-precision (64-bit) floating-point elements from a into memory using 32-bit indices. 64-bit elements are stored at addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale) subject to mask k (elements are not stored when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i32scatter_pd&expand=3045)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32scatter_pd&expand=3045)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscatterdpd, SCALE = 1))]
@@ -15598,7 +15598,7 @@ pub unsafe fn _mm512_mask_i32scatter_pd<const SCALE: i32>(
/// Scatter double-precision (64-bit) floating-point elements from a into memory using 64-bit indices. 64-bit elements are stored at addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i64scatter_pd&expand=3122)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64scatter_pd&expand=3122)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscatterqpd, SCALE = 1))]
@@ -15618,7 +15618,7 @@ pub unsafe fn _mm512_i64scatter_pd<const SCALE: i32>(
/// Scatter double-precision (64-bit) floating-point elements from a into memory using 64-bit indices. 64-bit elements are stored at addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale) subject to mask k (elements are not stored when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i64scatter_pd&expand=3123)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64scatter_pd&expand=3123)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscatterqpd, SCALE = 1))]
@@ -15638,7 +15638,7 @@ pub unsafe fn _mm512_mask_i64scatter_pd<const SCALE: i32>(
/// Scatter single-precision (32-bit) floating-point elements from a into memory using 32-bit indices. 32-bit elements are stored at addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i32scatter_ps&expand=3050)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32scatter_ps&expand=3050)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscatterdps, SCALE = 1))]
@@ -15658,7 +15658,7 @@ pub unsafe fn _mm512_i32scatter_ps<const SCALE: i32>(
/// Scatter single-precision (32-bit) floating-point elements from a into memory using 32-bit indices. 32-bit elements are stored at addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale) subject to mask k (elements are not stored when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i32scatter_ps&expand=3051)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32scatter_ps&expand=3051)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscatterdps, SCALE = 1))]
@@ -15678,7 +15678,7 @@ pub unsafe fn _mm512_mask_i32scatter_ps<const SCALE: i32>(
/// Scatter single-precision (32-bit) floating-point elements from a into memory using 64-bit indices. 32-bit elements are stored at addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale) subject to mask k (elements are not stored when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i64scatter_ps&expand=3128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64scatter_ps&expand=3128)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscatterqps, SCALE = 1))]
@@ -15698,7 +15698,7 @@ pub unsafe fn _mm512_i64scatter_ps<const SCALE: i32>(
/// Scatter single-precision (32-bit) floating-point elements from a into memory using 64-bit indices. 32-bit elements are stored at addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale) subject to mask k (elements are not stored when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i64scatter_ps&expand=3129)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64scatter_ps&expand=3129)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscatterqps, SCALE = 1))]
@@ -15718,7 +15718,7 @@ pub unsafe fn _mm512_mask_i64scatter_ps<const SCALE: i32>(
/// Scatter 64-bit integers from a into memory using 32-bit indices. 64-bit elements are stored at addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i32scatter_epi64&expand=3038)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32scatter_epi64&expand=3038)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))]
@@ -15738,7 +15738,7 @@ pub unsafe fn _mm512_i32scatter_epi64<const SCALE: i32>(
/// Scatter 64-bit integers from a into memory using 32-bit indices. 64-bit elements are stored at addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale) subject to mask k (elements are not stored when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i32scatter_epi64&expand=3039)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32scatter_epi64&expand=3039)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpscatterdq, SCALE = 1))]
@@ -15759,7 +15759,7 @@ pub unsafe fn _mm512_mask_i32scatter_epi64<const SCALE: i32>(
/// Scatter 64-bit integers from a into memory using 64-bit indices. 64-bit elements are stored at addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i64scatter_epi64&expand=3116)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64scatter_epi64&expand=3116)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpscatterqq, SCALE = 1))]
@@ -15779,7 +15779,7 @@ pub unsafe fn _mm512_i64scatter_epi64<const SCALE: i32>(
/// Scatter 64-bit integers from a into memory using 64-bit indices. 64-bit elements are stored at addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale) subject to mask k (elements are not stored when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i64scatter_epi64&expand=3117)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64scatter_epi64&expand=3117)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpscatterqq, SCALE = 1))]
@@ -15800,7 +15800,7 @@ pub unsafe fn _mm512_mask_i64scatter_epi64<const SCALE: i32>(
/// Scatter 32-bit integers from a into memory using 32-bit indices. 32-bit elements are stored at addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i32scatter_epi32&expand=3032)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i32scatter_epi32&expand=3032)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpscatterdd, SCALE = 1))]
@@ -15820,7 +15820,7 @@ pub unsafe fn _mm512_i32scatter_epi32<const SCALE: i32>(
/// Scatter 32-bit integers from a into memory using 32-bit indices. 32-bit elements are stored at addresses starting at base_addr and offset by each 32-bit element in vindex (each index is scaled by the factor in scale) subject to mask k (elements are not stored when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i32scatter_epi32&expand=3033)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i32scatter_epi32&expand=3033)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpscatterdd, SCALE = 1))]
@@ -15841,7 +15841,7 @@ pub unsafe fn _mm512_mask_i32scatter_epi32<const SCALE: i32>(
/// Scatter 32-bit integers from a into memory using 64-bit indices. 32-bit elements are stored at addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_i64scatter_epi32&expand=3108)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_i64scatter_epi32&expand=3108)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpscatterqd, SCALE = 1))]
@@ -15861,7 +15861,7 @@ pub unsafe fn _mm512_i64scatter_epi32<const SCALE: i32>(
/// Scatter 32-bit integers from a into memory using 64-bit indices. 32-bit elements are stored at addresses starting at base_addr and offset by each 64-bit element in vindex (each index is scaled by the factor in scale) subject to mask k (elements are not stored when the corresponding mask bit is not set). scale should be 1, 2, 4 or 8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_i64scatter_epi32&expand=3109)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_i64scatter_epi32&expand=3109)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpscatterqd, SCALE = 1))]
@@ -15882,7 +15882,7 @@ pub unsafe fn _mm512_mask_i64scatter_epi32<const SCALE: i32>(
/// Contiguously store the active 32-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_compress_epi32&expand=1198)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compress_epi32&expand=1198)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcompressd))]
@@ -15892,7 +15892,7 @@ pub unsafe fn _mm512_mask_compress_epi32(src: __m512i, k: __mmask16, a: __m512i)
/// Contiguously store the active 32-bit integers in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_compress_epi32&expand=1199)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_compress_epi32&expand=1199)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcompressd))]
@@ -15906,7 +15906,7 @@ pub unsafe fn _mm512_maskz_compress_epi32(k: __mmask16, a: __m512i) -> __m512i {
/// Contiguously store the active 32-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_compress_epi32&expand=1196)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compress_epi32&expand=1196)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressd))]
@@ -15916,7 +15916,7 @@ pub unsafe fn _mm256_mask_compress_epi32(src: __m256i, k: __mmask8, a: __m256i)
/// Contiguously store the active 32-bit integers in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_compress_epi32&expand=1197)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_compress_epi32&expand=1197)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressd))]
@@ -15930,7 +15930,7 @@ pub unsafe fn _mm256_maskz_compress_epi32(k: __mmask8, a: __m256i) -> __m256i {
/// Contiguously store the active 32-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_compress_epi32&expand=1194)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compress_epi32&expand=1194)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressd))]
@@ -15940,7 +15940,7 @@ pub unsafe fn _mm_mask_compress_epi32(src: __m128i, k: __mmask8, a: __m128i) ->
/// Contiguously store the active 32-bit integers in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_compress_epi32&expand=1195)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_compress_epi32&expand=1195)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressd))]
@@ -15954,7 +15954,7 @@ pub unsafe fn _mm_maskz_compress_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Contiguously store the active 64-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_compress_epi64&expand=1204)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compress_epi64&expand=1204)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcompressq))]
@@ -15964,7 +15964,7 @@ pub unsafe fn _mm512_mask_compress_epi64(src: __m512i, k: __mmask8, a: __m512i)
/// Contiguously store the active 64-bit integers in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_compress_epi64&expand=1205)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_compress_epi64&expand=1205)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcompressq))]
@@ -15978,7 +15978,7 @@ pub unsafe fn _mm512_maskz_compress_epi64(k: __mmask8, a: __m512i) -> __m512i {
/// Contiguously store the active 64-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_compress_epi64&expand=1202)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compress_epi64&expand=1202)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressq))]
@@ -15988,7 +15988,7 @@ pub unsafe fn _mm256_mask_compress_epi64(src: __m256i, k: __mmask8, a: __m256i)
/// Contiguously store the active 64-bit integers in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_compress_epi64&expand=1203)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_compress_epi64&expand=1203)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressq))]
@@ -16002,7 +16002,7 @@ pub unsafe fn _mm256_maskz_compress_epi64(k: __mmask8, a: __m256i) -> __m256i {
/// Contiguously store the active 64-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_compress_epi64&expand=1200)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compress_epi64&expand=1200)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressq))]
@@ -16012,7 +16012,7 @@ pub unsafe fn _mm_mask_compress_epi64(src: __m128i, k: __mmask8, a: __m128i) ->
/// Contiguously store the active 64-bit integers in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_compress_epi64&expand=1201)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_compress_epi64&expand=1201)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressq))]
@@ -16026,7 +16026,7 @@ pub unsafe fn _mm_maskz_compress_epi64(k: __mmask8, a: __m128i) -> __m128i {
/// Contiguously store the active single-precision (32-bit) floating-point elements in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_compress_ps&expand=1222)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compress_ps&expand=1222)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcompressps))]
@@ -16036,7 +16036,7 @@ pub unsafe fn _mm512_mask_compress_ps(src: __m512, k: __mmask16, a: __m512) -> _
/// Contiguously store the active single-precision (32-bit) floating-point elements in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_compress_ps&expand=1223)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_compress_ps&expand=1223)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcompressps))]
@@ -16050,7 +16050,7 @@ pub unsafe fn _mm512_maskz_compress_ps(k: __mmask16, a: __m512) -> __m512 {
/// Contiguously store the active single-precision (32-bit) floating-point elements in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_compress_ps&expand=1220)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compress_ps&expand=1220)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcompressps))]
@@ -16060,7 +16060,7 @@ pub unsafe fn _mm256_mask_compress_ps(src: __m256, k: __mmask8, a: __m256) -> __
/// Contiguously store the active single-precision (32-bit) floating-point elements in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_compress_ps&expand=1221)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_compress_ps&expand=1221)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcompressps))]
@@ -16074,7 +16074,7 @@ pub unsafe fn _mm256_maskz_compress_ps(k: __mmask8, a: __m256) -> __m256 {
/// Contiguously store the active single-precision (32-bit) floating-point elements in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_compress_ps&expand=1218)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compress_ps&expand=1218)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcompressps))]
@@ -16084,7 +16084,7 @@ pub unsafe fn _mm_mask_compress_ps(src: __m128, k: __mmask8, a: __m128) -> __m12
/// Contiguously store the active single-precision (32-bit) floating-point elements in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_compress_ps&expand=1219)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_compress_ps&expand=1219)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcompressps))]
@@ -16094,7 +16094,7 @@ pub unsafe fn _mm_maskz_compress_ps(k: __mmask8, a: __m128) -> __m128 {
/// Contiguously store the active double-precision (64-bit) floating-point elements in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_compress_pd&expand=1216)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compress_pd&expand=1216)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcompresspd))]
@@ -16104,7 +16104,7 @@ pub unsafe fn _mm512_mask_compress_pd(src: __m512d, k: __mmask8, a: __m512d) ->
/// Contiguously store the active double-precision (64-bit) floating-point elements in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_compress_pd&expand=1217)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_compress_pd&expand=1217)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcompresspd))]
@@ -16114,7 +16114,7 @@ pub unsafe fn _mm512_maskz_compress_pd(k: __mmask8, a: __m512d) -> __m512d {
/// Contiguously store the active double-precision (64-bit) floating-point elements in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_compress_pd&expand=1214)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compress_pd&expand=1214)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcompresspd))]
@@ -16124,7 +16124,7 @@ pub unsafe fn _mm256_mask_compress_pd(src: __m256d, k: __mmask8, a: __m256d) ->
/// Contiguously store the active double-precision (64-bit) floating-point elements in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_compress_pd&expand=1215)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_compress_pd&expand=1215)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcompresspd))]
@@ -16138,7 +16138,7 @@ pub unsafe fn _mm256_maskz_compress_pd(k: __mmask8, a: __m256d) -> __m256d {
/// Contiguously store the active double-precision (64-bit) floating-point elements in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_compress_pd&expand=1212)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compress_pd&expand=1212)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcompresspd))]
@@ -16148,7 +16148,7 @@ pub unsafe fn _mm_mask_compress_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m
/// Contiguously store the active double-precision (64-bit) floating-point elements in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_compress_pd&expand=1213)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_compress_pd&expand=1213)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcompresspd))]
@@ -16158,7 +16158,7 @@ pub unsafe fn _mm_maskz_compress_pd(k: __mmask8, a: __m128d) -> __m128d {
/// Contiguously store the active 32-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_compressstoreu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compressstoreu_epi32)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcompressd))]
@@ -16168,7 +16168,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask16,
/// Contiguously store the active 32-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_compressstoreu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compressstoreu_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressd))]
@@ -16178,7 +16178,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask8,
/// Contiguously store the active 32-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_compressstoreu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compressstoreu_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressd))]
@@ -16188,7 +16188,7 @@ pub unsafe fn _mm_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask8, a:
/// Contiguously store the active 64-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_compressstoreu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compressstoreu_epi64)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcompressq))]
@@ -16198,7 +16198,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8,
/// Contiguously store the active 64-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_compressstoreu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compressstoreu_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressq))]
@@ -16208,7 +16208,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8,
/// Contiguously store the active 64-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_compressstoreu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compressstoreu_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressq))]
@@ -16218,7 +16218,7 @@ pub unsafe fn _mm_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8, a:
/// Contiguously store the active single-precision (32-bit) floating-point elements in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_compressstoreu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compressstoreu_ps)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcompressps))]
@@ -16228,7 +16228,7 @@ pub unsafe fn _mm512_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask16, a:
/// Contiguously store the active single-precision (32-bit) floating-point elements in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_compressstoreu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compressstoreu_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcompressps))]
@@ -16238,7 +16238,7 @@ pub unsafe fn _mm256_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask8, a:
/// Contiguously store the active single-precision (32-bit) floating-point elements in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_compressstoreu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compressstoreu_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcompressps))]
@@ -16248,7 +16248,7 @@ pub unsafe fn _mm_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask8, a: __m
/// Contiguously store the active double-precision (64-bit) floating-point elements in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_compressstoreu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compressstoreu_pd)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcompresspd))]
@@ -16258,7 +16258,7 @@ pub unsafe fn _mm512_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a:
/// Contiguously store the active double-precision (64-bit) floating-point elements in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_compressstoreu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compressstoreu_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcompresspd))]
@@ -16268,7 +16268,7 @@ pub unsafe fn _mm256_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a:
/// Contiguously store the active double-precision (64-bit) floating-point elements in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_compressstoreu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compressstoreu_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vcompresspd))]
@@ -16278,7 +16278,7 @@ pub unsafe fn _mm_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a: __m
/// Load contiguous active 32-bit integers from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_expand_epi32&expand=2316)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expand_epi32&expand=2316)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpexpandd))]
@@ -16288,7 +16288,7 @@ pub unsafe fn _mm512_mask_expand_epi32(src: __m512i, k: __mmask16, a: __m512i) -
/// Load contiguous active 32-bit integers from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_expand_epi32&expand=2317)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expand_epi32&expand=2317)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpexpandd))]
@@ -16302,7 +16302,7 @@ pub unsafe fn _mm512_maskz_expand_epi32(k: __mmask16, a: __m512i) -> __m512i {
/// Load contiguous active 32-bit integers from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_expand_epi32&expand=2314)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expand_epi32&expand=2314)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandd))]
@@ -16312,7 +16312,7 @@ pub unsafe fn _mm256_mask_expand_epi32(src: __m256i, k: __mmask8, a: __m256i) ->
/// Load contiguous active 32-bit integers from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_expand_epi32&expand=2315)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expand_epi32&expand=2315)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandd))]
@@ -16326,7 +16326,7 @@ pub unsafe fn _mm256_maskz_expand_epi32(k: __mmask8, a: __m256i) -> __m256i {
/// Load contiguous active 32-bit integers from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_expand_epi32&expand=2312)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expand_epi32&expand=2312)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandd))]
@@ -16336,7 +16336,7 @@ pub unsafe fn _mm_mask_expand_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __
/// Load contiguous active 32-bit integers from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_expand_epi32&expand=2313)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expand_epi32&expand=2313)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandd))]
@@ -16350,7 +16350,7 @@ pub unsafe fn _mm_maskz_expand_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Load contiguous active 64-bit integers from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_expand_epi64&expand=2322)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expand_epi64&expand=2322)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpexpandq))]
@@ -16360,7 +16360,7 @@ pub unsafe fn _mm512_mask_expand_epi64(src: __m512i, k: __mmask8, a: __m512i) ->
/// Load contiguous active 64-bit integers from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_expand_epi64&expand=2323)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expand_epi64&expand=2323)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpexpandq))]
@@ -16374,7 +16374,7 @@ pub unsafe fn _mm512_maskz_expand_epi64(k: __mmask8, a: __m512i) -> __m512i {
/// Load contiguous active 64-bit integers from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_expand_epi64&expand=2320)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expand_epi64&expand=2320)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandq))]
@@ -16384,7 +16384,7 @@ pub unsafe fn _mm256_mask_expand_epi64(src: __m256i, k: __mmask8, a: __m256i) ->
/// Load contiguous active 64-bit integers from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_expand_epi64&expand=2321)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expand_epi64&expand=2321)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandq))]
@@ -16398,7 +16398,7 @@ pub unsafe fn _mm256_maskz_expand_epi64(k: __mmask8, a: __m256i) -> __m256i {
/// Load contiguous active 64-bit integers from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_expand_epi64&expand=2318)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expand_epi64&expand=2318)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandq))]
@@ -16408,7 +16408,7 @@ pub unsafe fn _mm_mask_expand_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __
/// Load contiguous active 64-bit integers from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_expand_epi64&expand=2319)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expand_epi64&expand=2319)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandq))]
@@ -16422,7 +16422,7 @@ pub unsafe fn _mm_maskz_expand_epi64(k: __mmask8, a: __m128i) -> __m128i {
/// Load contiguous active single-precision (32-bit) floating-point elements from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_expand_ps&expand=2340)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expand_ps&expand=2340)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vexpandps))]
@@ -16432,7 +16432,7 @@ pub unsafe fn _mm512_mask_expand_ps(src: __m512, k: __mmask16, a: __m512) -> __m
/// Load contiguous active single-precision (32-bit) floating-point elements from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_expand_ps&expand=2341)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expand_ps&expand=2341)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vexpandps))]
@@ -16442,7 +16442,7 @@ pub unsafe fn _mm512_maskz_expand_ps(k: __mmask16, a: __m512) -> __m512 {
/// Load contiguous active single-precision (32-bit) floating-point elements from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_expand_ps&expand=2338)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expand_ps&expand=2338)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vexpandps))]
@@ -16452,7 +16452,7 @@ pub unsafe fn _mm256_mask_expand_ps(src: __m256, k: __mmask8, a: __m256) -> __m2
/// Load contiguous active single-precision (32-bit) floating-point elements from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_expand_ps&expand=2339)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expand_ps&expand=2339)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vexpandps))]
@@ -16466,7 +16466,7 @@ pub unsafe fn _mm256_maskz_expand_ps(k: __mmask8, a: __m256) -> __m256 {
/// Load contiguous active single-precision (32-bit) floating-point elements from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_expand_ps&expand=2336)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expand_ps&expand=2336)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vexpandps))]
@@ -16476,7 +16476,7 @@ pub unsafe fn _mm_mask_expand_ps(src: __m128, k: __mmask8, a: __m128) -> __m128
/// Load contiguous active single-precision (32-bit) floating-point elements from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_expand_ps&expand=2337)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expand_ps&expand=2337)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vexpandps))]
@@ -16486,7 +16486,7 @@ pub unsafe fn _mm_maskz_expand_ps(k: __mmask8, a: __m128) -> __m128 {
/// Load contiguous active double-precision (64-bit) floating-point elements from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_expand_pd&expand=2334)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expand_pd&expand=2334)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vexpandpd))]
@@ -16496,7 +16496,7 @@ pub unsafe fn _mm512_mask_expand_pd(src: __m512d, k: __mmask8, a: __m512d) -> __
/// Load contiguous active double-precision (64-bit) floating-point elements from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_expand_pd&expand=2335)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expand_pd&expand=2335)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vexpandpd))]
@@ -16506,7 +16506,7 @@ pub unsafe fn _mm512_maskz_expand_pd(k: __mmask8, a: __m512d) -> __m512d {
/// Load contiguous active double-precision (64-bit) floating-point elements from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_expand_pd&expand=2332)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expand_pd&expand=2332)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vexpandpd))]
@@ -16516,7 +16516,7 @@ pub unsafe fn _mm256_mask_expand_pd(src: __m256d, k: __mmask8, a: __m256d) -> __
/// Load contiguous active double-precision (64-bit) floating-point elements from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_expand_pd&expand=2333)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expand_pd&expand=2333)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vexpandpd))]
@@ -16530,7 +16530,7 @@ pub unsafe fn _mm256_maskz_expand_pd(k: __mmask8, a: __m256d) -> __m256d {
/// Load contiguous active double-precision (64-bit) floating-point elements from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_expand_pd&expand=2330)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expand_pd&expand=2330)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vexpandpd))]
@@ -16540,7 +16540,7 @@ pub unsafe fn _mm_mask_expand_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m12
/// Load contiguous active double-precision (64-bit) floating-point elements from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_expand_pd&expand=2331)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expand_pd&expand=2331)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vexpandpd))]
@@ -16550,13 +16550,13 @@ pub unsafe fn _mm_maskz_expand_pd(k: __mmask8, a: __m128d) -> __m128d {
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rol_epi32&expand=4685)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rol_epi32&expand=4685)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_rol_epi32<const IMM8: i32>(a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let r = vprold(a, IMM8);
transmute(r)
@@ -16564,7 +16564,7 @@ pub unsafe fn _mm512_rol_epi32<const IMM8: i32>(a: __m512i) -> __m512i {
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rol_epi32&expand=4683)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rol_epi32&expand=4683)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 1))]
@@ -16574,7 +16574,7 @@ pub unsafe fn _mm512_mask_rol_epi32<const IMM8: i32>(
k: __mmask16,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let r = vprold(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i32x16()))
@@ -16582,13 +16582,13 @@ pub unsafe fn _mm512_mask_rol_epi32<const IMM8: i32>(
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rol_epi32&expand=4684)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rol_epi32&expand=4684)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_rol_epi32<const IMM8: i32>(k: __mmask16, a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let r = vprold(a, IMM8);
let zero = _mm512_setzero_si512().as_i32x16();
@@ -16597,13 +16597,13 @@ pub unsafe fn _mm512_maskz_rol_epi32<const IMM8: i32>(k: __mmask16, a: __m512i)
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rol_epi32&expand=4682)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rol_epi32&expand=4682)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm256_rol_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x8();
let r = vprold256(a, IMM8);
transmute(r)
@@ -16611,7 +16611,7 @@ pub unsafe fn _mm256_rol_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rol_epi32&expand=4680)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rol_epi32&expand=4680)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 1))]
@@ -16621,7 +16621,7 @@ pub unsafe fn _mm256_mask_rol_epi32<const IMM8: i32>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x8();
let r = vprold256(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i32x8()))
@@ -16629,13 +16629,13 @@ pub unsafe fn _mm256_mask_rol_epi32<const IMM8: i32>(
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rol_epi32&expand=4681)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rol_epi32&expand=4681)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_rol_epi32<const IMM8: i32>(k: __mmask8, a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x8();
let r = vprold256(a, IMM8);
let zero = _mm256_setzero_si256().as_i32x8();
@@ -16644,13 +16644,13 @@ pub unsafe fn _mm256_maskz_rol_epi32<const IMM8: i32>(k: __mmask8, a: __m256i) -
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rol_epi32&expand=4679)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rol_epi32&expand=4679)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm_rol_epi32<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x4();
let r = vprold128(a, IMM8);
transmute(r)
@@ -16658,7 +16658,7 @@ pub unsafe fn _mm_rol_epi32<const IMM8: i32>(a: __m128i) -> __m128i {
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rol_epi32&expand=4677)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rol_epi32&expand=4677)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 1))]
@@ -16668,7 +16668,7 @@ pub unsafe fn _mm_mask_rol_epi32<const IMM8: i32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x4();
let r = vprold128(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i32x4()))
@@ -16676,13 +16676,13 @@ pub unsafe fn _mm_mask_rol_epi32<const IMM8: i32>(
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rol_epi32&expand=4678)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rol_epi32&expand=4678)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_rol_epi32<const IMM8: i32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x4();
let r = vprold128(a, IMM8);
let zero = _mm_setzero_si128().as_i32x4();
@@ -16691,13 +16691,13 @@ pub unsafe fn _mm_maskz_rol_epi32<const IMM8: i32>(k: __mmask8, a: __m128i) -> _
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_ror_epi32&expand=4721)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_ror_epi32&expand=4721)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_ror_epi32<const IMM8: i32>(a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let r = vprord(a, IMM8);
transmute(r)
@@ -16705,7 +16705,7 @@ pub unsafe fn _mm512_ror_epi32<const IMM8: i32>(a: __m512i) -> __m512i {
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_ror_epi32&expand=4719)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_ror_epi32&expand=4719)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 123))]
@@ -16715,7 +16715,7 @@ pub unsafe fn _mm512_mask_ror_epi32<const IMM8: i32>(
k: __mmask16,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let r = vprord(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i32x16()))
@@ -16723,13 +16723,13 @@ pub unsafe fn _mm512_mask_ror_epi32<const IMM8: i32>(
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_ror_epi32&expand=4720)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_ror_epi32&expand=4720)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 123))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_ror_epi32<const IMM8: i32>(k: __mmask16, a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let r = vprord(a, IMM8);
let zero = _mm512_setzero_si512().as_i32x16();
@@ -16738,13 +16738,13 @@ pub unsafe fn _mm512_maskz_ror_epi32<const IMM8: i32>(k: __mmask16, a: __m512i)
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_ror_epi32&expand=4718)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_ror_epi32&expand=4718)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm256_ror_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x8();
let r = vprord256(a, IMM8);
transmute(r)
@@ -16752,7 +16752,7 @@ pub unsafe fn _mm256_ror_epi32<const IMM8: i32>(a: __m256i) -> __m256i {
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_ror_epi32&expand=4716)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_ror_epi32&expand=4716)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 123))]
@@ -16762,7 +16762,7 @@ pub unsafe fn _mm256_mask_ror_epi32<const IMM8: i32>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x8();
let r = vprord256(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i32x8()))
@@ -16770,13 +16770,13 @@ pub unsafe fn _mm256_mask_ror_epi32<const IMM8: i32>(
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_ror_epi32&expand=4717)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_ror_epi32&expand=4717)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 123))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_ror_epi32<const IMM8: i32>(k: __mmask8, a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x8();
let r = vprord256(a, IMM8);
let zero = _mm256_setzero_si256().as_i32x8();
@@ -16785,13 +16785,13 @@ pub unsafe fn _mm256_maskz_ror_epi32<const IMM8: i32>(k: __mmask8, a: __m256i) -
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ror_epi32&expand=4715)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ror_epi32&expand=4715)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm_ror_epi32<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x4();
let r = vprord128(a, IMM8);
transmute(r)
@@ -16799,7 +16799,7 @@ pub unsafe fn _mm_ror_epi32<const IMM8: i32>(a: __m128i) -> __m128i {
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_ror_epi32&expand=4713)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_ror_epi32&expand=4713)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 123))]
@@ -16809,7 +16809,7 @@ pub unsafe fn _mm_mask_ror_epi32<const IMM8: i32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x4();
let r = vprord128(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i32x4()))
@@ -16817,13 +16817,13 @@ pub unsafe fn _mm_mask_ror_epi32<const IMM8: i32>(
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_ror_epi32&expand=4714)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_ror_epi32&expand=4714)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprold, IMM8 = 123))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_ror_epi32<const IMM8: i32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x4();
let r = vprord128(a, IMM8);
let zero = _mm_setzero_si128().as_i32x4();
@@ -16832,13 +16832,13 @@ pub unsafe fn _mm_maskz_ror_epi32<const IMM8: i32>(k: __mmask8, a: __m128i) -> _
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rol_epi64&expand=4694)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rol_epi64&expand=4694)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_rol_epi64<const IMM8: i32>(a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let r = vprolq(a, IMM8);
transmute(r)
@@ -16846,7 +16846,7 @@ pub unsafe fn _mm512_rol_epi64<const IMM8: i32>(a: __m512i) -> __m512i {
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rol_epi64&expand=4692)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rol_epi64&expand=4692)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))]
@@ -16856,7 +16856,7 @@ pub unsafe fn _mm512_mask_rol_epi64<const IMM8: i32>(
k: __mmask8,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let r = vprolq(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i64x8()))
@@ -16864,13 +16864,13 @@ pub unsafe fn _mm512_mask_rol_epi64<const IMM8: i32>(
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rol_epi64&expand=4693)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rol_epi64&expand=4693)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_rol_epi64<const IMM8: i32>(k: __mmask8, a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let r = vprolq(a, IMM8);
let zero = _mm512_setzero_si512().as_i64x8();
@@ -16879,13 +16879,13 @@ pub unsafe fn _mm512_maskz_rol_epi64<const IMM8: i32>(k: __mmask8, a: __m512i) -
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rol_epi64&expand=4691)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rol_epi64&expand=4691)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm256_rol_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x4();
let r = vprolq256(a, IMM8);
transmute(r)
@@ -16893,7 +16893,7 @@ pub unsafe fn _mm256_rol_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rol_epi64&expand=4689)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rol_epi64&expand=4689)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))]
@@ -16903,7 +16903,7 @@ pub unsafe fn _mm256_mask_rol_epi64<const IMM8: i32>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x4();
let r = vprolq256(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i64x4()))
@@ -16911,13 +16911,13 @@ pub unsafe fn _mm256_mask_rol_epi64<const IMM8: i32>(
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rol_epi64&expand=4690)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rol_epi64&expand=4690)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_rol_epi64<const IMM8: i32>(k: __mmask8, a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x4();
let r = vprolq256(a, IMM8);
let zero = _mm256_setzero_si256().as_i64x4();
@@ -16926,13 +16926,13 @@ pub unsafe fn _mm256_maskz_rol_epi64<const IMM8: i32>(k: __mmask8, a: __m256i) -
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rol_epi64&expand=4688)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rol_epi64&expand=4688)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm_rol_epi64<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x2();
let r = vprolq128(a, IMM8);
transmute(r)
@@ -16940,7 +16940,7 @@ pub unsafe fn _mm_rol_epi64<const IMM8: i32>(a: __m128i) -> __m128i {
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rol_epi64&expand=4686)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rol_epi64&expand=4686)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))]
@@ -16950,7 +16950,7 @@ pub unsafe fn _mm_mask_rol_epi64<const IMM8: i32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x2();
let r = vprolq128(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i64x2()))
@@ -16958,13 +16958,13 @@ pub unsafe fn _mm_mask_rol_epi64<const IMM8: i32>(
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rol_epi64&expand=4687)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rol_epi64&expand=4687)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_rol_epi64<const IMM8: i32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x2();
let r = vprolq128(a, IMM8);
let zero = _mm_setzero_si128().as_i64x2();
@@ -16973,13 +16973,13 @@ pub unsafe fn _mm_maskz_rol_epi64<const IMM8: i32>(k: __mmask8, a: __m128i) -> _
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_ror_epi64&expand=4730)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_ror_epi64&expand=4730)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_ror_epi64<const IMM8: i32>(a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let r = vprorq(a, IMM8);
transmute(r)
@@ -16987,7 +16987,7 @@ pub unsafe fn _mm512_ror_epi64<const IMM8: i32>(a: __m512i) -> __m512i {
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_ror_epi64&expand=4728)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_ror_epi64&expand=4728)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))]
@@ -16997,7 +16997,7 @@ pub unsafe fn _mm512_mask_ror_epi64<const IMM8: i32>(
k: __mmask8,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let r = vprorq(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i64x8()))
@@ -17005,13 +17005,13 @@ pub unsafe fn _mm512_mask_ror_epi64<const IMM8: i32>(
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_ror_epi64&expand=4729)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_ror_epi64&expand=4729)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_ror_epi64<const IMM8: i32>(k: __mmask8, a: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let r = vprorq(a, IMM8);
let zero = _mm512_setzero_si512().as_i64x8();
@@ -17020,13 +17020,13 @@ pub unsafe fn _mm512_maskz_ror_epi64<const IMM8: i32>(k: __mmask8, a: __m512i) -
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_ror_epi64&expand=4727)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_ror_epi64&expand=4727)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm256_ror_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x4();
let r = vprorq256(a, IMM8);
transmute(r)
@@ -17034,7 +17034,7 @@ pub unsafe fn _mm256_ror_epi64<const IMM8: i32>(a: __m256i) -> __m256i {
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_ror_epi64&expand=4725)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_ror_epi64&expand=4725)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))]
@@ -17044,7 +17044,7 @@ pub unsafe fn _mm256_mask_ror_epi64<const IMM8: i32>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x4();
let r = vprorq256(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i64x4()))
@@ -17052,13 +17052,13 @@ pub unsafe fn _mm256_mask_ror_epi64<const IMM8: i32>(
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_ror_epi64&expand=4726)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_ror_epi64&expand=4726)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_ror_epi64<const IMM8: i32>(k: __mmask8, a: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x4();
let r = vprorq256(a, IMM8);
let zero = _mm256_setzero_si256().as_i64x4();
@@ -17067,13 +17067,13 @@ pub unsafe fn _mm256_maskz_ror_epi64<const IMM8: i32>(k: __mmask8, a: __m256i) -
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ror_epi64&expand=4724)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ror_epi64&expand=4724)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm_ror_epi64<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x2();
let r = vprorq128(a, IMM8);
transmute(r)
@@ -17081,7 +17081,7 @@ pub unsafe fn _mm_ror_epi64<const IMM8: i32>(a: __m128i) -> __m128i {
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_ror_epi64&expand=4722)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_ror_epi64&expand=4722)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))]
@@ -17091,7 +17091,7 @@ pub unsafe fn _mm_mask_ror_epi64<const IMM8: i32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x2();
let r = vprorq128(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i64x2()))
@@ -17099,13 +17099,13 @@ pub unsafe fn _mm_mask_ror_epi64<const IMM8: i32>(
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_ror_epi64&expand=4723)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_ror_epi64&expand=4723)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolq, IMM8 = 15))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_ror_epi64<const IMM8: i32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x2();
let r = vprorq128(a, IMM8);
let zero = _mm_setzero_si128().as_i64x2();
@@ -17114,13 +17114,13 @@ pub unsafe fn _mm_maskz_ror_epi64<const IMM8: i32>(k: __mmask8, a: __m128i) -> _
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_slli_epi32&expand=5310)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_slli_epi32&expand=5310)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_slli_epi32<const IMM8: u32>(a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let r = vpsllid(a, IMM8);
transmute(r)
@@ -17128,7 +17128,7 @@ pub unsafe fn _mm512_slli_epi32<const IMM8: u32>(a: __m512i) -> __m512i {
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_slli_epi32&expand=5308)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_slli_epi32&expand=5308)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))]
@@ -17138,7 +17138,7 @@ pub unsafe fn _mm512_mask_slli_epi32<const IMM8: u32>(
k: __mmask16,
a: __m512i,
) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let shf = vpsllid(a, IMM8);
transmute(simd_select_bitmask(k, shf, src.as_i32x16()))
@@ -17146,13 +17146,13 @@ pub unsafe fn _mm512_mask_slli_epi32<const IMM8: u32>(
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_slli_epi32&expand=5309)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_slli_epi32&expand=5309)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_slli_epi32<const IMM8: u32>(k: __mmask16, a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let shf = vpsllid(a, IMM8);
let zero = _mm512_setzero_si512().as_i32x16();
@@ -17161,7 +17161,7 @@ pub unsafe fn _mm512_maskz_slli_epi32<const IMM8: u32>(k: __mmask16, a: __m512i)
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_slli_epi32&expand=5305)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_slli_epi32&expand=5305)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))]
@@ -17171,7 +17171,7 @@ pub unsafe fn _mm256_mask_slli_epi32<const IMM8: u32>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psllid256(a.as_i32x8(), imm8);
transmute(simd_select_bitmask(k, r, src.as_i32x8()))
@@ -17179,13 +17179,13 @@ pub unsafe fn _mm256_mask_slli_epi32<const IMM8: u32>(
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_slli_epi32&expand=5306)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_slli_epi32&expand=5306)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_slli_epi32<const IMM8: u32>(k: __mmask8, a: __m256i) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psllid256(a.as_i32x8(), imm8);
let zero = _mm256_setzero_si256().as_i32x8();
@@ -17194,7 +17194,7 @@ pub unsafe fn _mm256_maskz_slli_epi32<const IMM8: u32>(k: __mmask8, a: __m256i)
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_slli_epi32&expand=5302)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_slli_epi32&expand=5302)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))]
@@ -17204,7 +17204,7 @@ pub unsafe fn _mm_mask_slli_epi32<const IMM8: u32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psllid128(a.as_i32x4(), imm8);
transmute(simd_select_bitmask(k, r, src.as_i32x4()))
@@ -17212,13 +17212,13 @@ pub unsafe fn _mm_mask_slli_epi32<const IMM8: u32>(
/// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_slli_epi32&expand=5303)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_slli_epi32&expand=5303)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpslld, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_slli_epi32<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psllid128(a.as_i32x4(), imm8);
let zero = _mm_setzero_si128().as_i32x4();
@@ -17227,13 +17227,13 @@ pub unsafe fn _mm_maskz_slli_epi32<const IMM8: u32>(k: __mmask8, a: __m128i) ->
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srli_epi32&expand=5522)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srli_epi32&expand=5522)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_srli_epi32<const IMM8: u32>(a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let r = vpsrlid(a, IMM8);
transmute(r)
@@ -17241,7 +17241,7 @@ pub unsafe fn _mm512_srli_epi32<const IMM8: u32>(a: __m512i) -> __m512i {
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srli_epi32&expand=5520)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srli_epi32&expand=5520)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))]
@@ -17251,7 +17251,7 @@ pub unsafe fn _mm512_mask_srli_epi32<const IMM8: u32>(
k: __mmask16,
a: __m512i,
) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let shf = vpsrlid(a, IMM8);
transmute(simd_select_bitmask(k, shf, src.as_i32x16()))
@@ -17259,13 +17259,13 @@ pub unsafe fn _mm512_mask_srli_epi32<const IMM8: u32>(
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srli_epi32&expand=5521)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srli_epi32&expand=5521)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_srli_epi32<const IMM8: u32>(k: __mmask16, a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let shf = vpsrlid(a, IMM8);
let zero = _mm512_setzero_si512().as_i32x16();
@@ -17274,7 +17274,7 @@ pub unsafe fn _mm512_maskz_srli_epi32<const IMM8: u32>(k: __mmask16, a: __m512i)
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srli_epi32&expand=5517)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srli_epi32&expand=5517)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))]
@@ -17284,7 +17284,7 @@ pub unsafe fn _mm256_mask_srli_epi32<const IMM8: u32>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psrlid256(a.as_i32x8(), imm8);
transmute(simd_select_bitmask(k, r, src.as_i32x8()))
@@ -17292,13 +17292,13 @@ pub unsafe fn _mm256_mask_srli_epi32<const IMM8: u32>(
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srli_epi32&expand=5518)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srli_epi32&expand=5518)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_srli_epi32<const IMM8: u32>(k: __mmask8, a: __m256i) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psrlid256(a.as_i32x8(), imm8);
let zero = _mm256_setzero_si256().as_i32x8();
@@ -17307,7 +17307,7 @@ pub unsafe fn _mm256_maskz_srli_epi32<const IMM8: u32>(k: __mmask8, a: __m256i)
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srli_epi32&expand=5514)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srli_epi32&expand=5514)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))]
@@ -17317,7 +17317,7 @@ pub unsafe fn _mm_mask_srli_epi32<const IMM8: u32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psrlid128(a.as_i32x4(), imm8);
transmute(simd_select_bitmask(k, r, src.as_i32x4()))
@@ -17325,13 +17325,13 @@ pub unsafe fn _mm_mask_srli_epi32<const IMM8: u32>(
/// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srli_epi32&expand=5515)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srli_epi32&expand=5515)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrld, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_srli_epi32<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psrlid128(a.as_i32x4(), imm8);
let zero = _mm_setzero_si128().as_i32x4();
@@ -17340,13 +17340,13 @@ pub unsafe fn _mm_maskz_srli_epi32<const IMM8: u32>(k: __mmask8, a: __m128i) ->
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_slli_epi64&expand=5319)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_slli_epi64&expand=5319)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_slli_epi64<const IMM8: u32>(a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let r = vpslliq(a, IMM8);
transmute(r)
@@ -17354,7 +17354,7 @@ pub unsafe fn _mm512_slli_epi64<const IMM8: u32>(a: __m512i) -> __m512i {
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_slli_epi64&expand=5317)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_slli_epi64&expand=5317)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))]
@@ -17364,7 +17364,7 @@ pub unsafe fn _mm512_mask_slli_epi64<const IMM8: u32>(
k: __mmask8,
a: __m512i,
) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let shf = vpslliq(a, IMM8);
transmute(simd_select_bitmask(k, shf, src.as_i64x8()))
@@ -17372,13 +17372,13 @@ pub unsafe fn _mm512_mask_slli_epi64<const IMM8: u32>(
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_slli_epi64&expand=5318)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_slli_epi64&expand=5318)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_slli_epi64<const IMM8: u32>(k: __mmask8, a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let shf = vpslliq(a, IMM8);
let zero = _mm512_setzero_si512().as_i64x8();
@@ -17387,7 +17387,7 @@ pub unsafe fn _mm512_maskz_slli_epi64<const IMM8: u32>(k: __mmask8, a: __m512i)
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_slli_epi64&expand=5314)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_slli_epi64&expand=5314)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))]
@@ -17397,7 +17397,7 @@ pub unsafe fn _mm256_mask_slli_epi64<const IMM8: u32>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = pslliq256(a.as_i64x4(), imm8);
transmute(simd_select_bitmask(k, r, src.as_i64x4()))
@@ -17405,13 +17405,13 @@ pub unsafe fn _mm256_mask_slli_epi64<const IMM8: u32>(
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_slli_epi64&expand=5315)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_slli_epi64&expand=5315)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_slli_epi64<const IMM8: u32>(k: __mmask8, a: __m256i) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = pslliq256(a.as_i64x4(), imm8);
let zero = _mm256_setzero_si256().as_i64x4();
@@ -17420,7 +17420,7 @@ pub unsafe fn _mm256_maskz_slli_epi64<const IMM8: u32>(k: __mmask8, a: __m256i)
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_slli_epi64&expand=5311)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_slli_epi64&expand=5311)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))]
@@ -17430,7 +17430,7 @@ pub unsafe fn _mm_mask_slli_epi64<const IMM8: u32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = pslliq128(a.as_i64x2(), imm8);
transmute(simd_select_bitmask(k, r, src.as_i64x2()))
@@ -17438,13 +17438,13 @@ pub unsafe fn _mm_mask_slli_epi64<const IMM8: u32>(
/// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_slli_epi64&expand=5312)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_slli_epi64&expand=5312)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllq, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_slli_epi64<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = pslliq128(a.as_i64x2(), imm8);
let zero = _mm_setzero_si128().as_i64x2();
@@ -17453,13 +17453,13 @@ pub unsafe fn _mm_maskz_slli_epi64<const IMM8: u32>(k: __mmask8, a: __m128i) ->
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srli_epi64&expand=5531)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srli_epi64&expand=5531)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_srli_epi64<const IMM8: u32>(a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let r = vpsrliq(a, IMM8);
transmute(r)
@@ -17467,7 +17467,7 @@ pub unsafe fn _mm512_srli_epi64<const IMM8: u32>(a: __m512i) -> __m512i {
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srli_epi64&expand=5529)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srli_epi64&expand=5529)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))]
@@ -17477,7 +17477,7 @@ pub unsafe fn _mm512_mask_srli_epi64<const IMM8: u32>(
k: __mmask8,
a: __m512i,
) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let shf = vpsrliq(a, IMM8);
transmute(simd_select_bitmask(k, shf, src.as_i64x8()))
@@ -17485,13 +17485,13 @@ pub unsafe fn _mm512_mask_srli_epi64<const IMM8: u32>(
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srli_epi64&expand=5530)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srli_epi64&expand=5530)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_srli_epi64<const IMM8: u32>(k: __mmask8, a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let shf = vpsrliq(a, IMM8);
let zero = _mm512_setzero_si512().as_i64x8();
@@ -17500,7 +17500,7 @@ pub unsafe fn _mm512_maskz_srli_epi64<const IMM8: u32>(k: __mmask8, a: __m512i)
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srli_epi64&expand=5526)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srli_epi64&expand=5526)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))]
@@ -17510,7 +17510,7 @@ pub unsafe fn _mm256_mask_srli_epi64<const IMM8: u32>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psrliq256(a.as_i64x4(), imm8);
transmute(simd_select_bitmask(k, r, src.as_i64x4()))
@@ -17518,13 +17518,13 @@ pub unsafe fn _mm256_mask_srli_epi64<const IMM8: u32>(
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srli_epi64&expand=5527)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srli_epi64&expand=5527)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_srli_epi64<const IMM8: u32>(k: __mmask8, a: __m256i) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psrliq256(a.as_i64x4(), imm8);
let zero = _mm256_setzero_si256().as_i64x4();
@@ -17533,7 +17533,7 @@ pub unsafe fn _mm256_maskz_srli_epi64<const IMM8: u32>(k: __mmask8, a: __m256i)
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srli_epi64&expand=5523)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srli_epi64&expand=5523)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))]
@@ -17543,7 +17543,7 @@ pub unsafe fn _mm_mask_srli_epi64<const IMM8: u32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psrliq128(a.as_i64x2(), imm8);
transmute(simd_select_bitmask(k, r, src.as_i64x2()))
@@ -17551,13 +17551,13 @@ pub unsafe fn _mm_mask_srli_epi64<const IMM8: u32>(
/// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srli_epi64&expand=5524)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srli_epi64&expand=5524)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlq, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_srli_epi64<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i32;
let r = psrliq128(a.as_i64x2(), imm8);
let zero = _mm_setzero_si128().as_i64x2();
@@ -17566,7 +17566,7 @@ pub unsafe fn _mm_maskz_srli_epi64<const IMM8: u32>(k: __mmask8, a: __m128i) ->
/// Shift packed 32-bit integers in a left by count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sll_epi32&expand=5280)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sll_epi32&expand=5280)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpslld))]
@@ -17576,7 +17576,7 @@ pub unsafe fn _mm512_sll_epi32(a: __m512i, count: __m128i) -> __m512i {
/// Shift packed 32-bit integers in a left by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sll_epi32&expand=5278)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sll_epi32&expand=5278)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpslld))]
@@ -17592,7 +17592,7 @@ pub unsafe fn _mm512_mask_sll_epi32(
/// Shift packed 32-bit integers in a left by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sll_epi32&expand=5279)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sll_epi32&expand=5279)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpslld))]
@@ -17604,7 +17604,7 @@ pub unsafe fn _mm512_maskz_sll_epi32(k: __mmask16, a: __m512i, count: __m128i) -
/// Shift packed 32-bit integers in a left by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sll_epi32&expand=5275)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sll_epi32&expand=5275)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpslld))]
@@ -17620,7 +17620,7 @@ pub unsafe fn _mm256_mask_sll_epi32(
/// Shift packed 32-bit integers in a left by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sll_epi32&expand=5276)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sll_epi32&expand=5276)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpslld))]
@@ -17632,7 +17632,7 @@ pub unsafe fn _mm256_maskz_sll_epi32(k: __mmask8, a: __m256i, count: __m128i) ->
/// Shift packed 32-bit integers in a left by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sll_epi32&expand=5272)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sll_epi32&expand=5272)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpslld))]
@@ -17643,7 +17643,7 @@ pub unsafe fn _mm_mask_sll_epi32(src: __m128i, k: __mmask8, a: __m128i, count: _
/// Shift packed 32-bit integers in a left by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sll_epi32&expand=5273)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sll_epi32&expand=5273)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpslld))]
@@ -17655,7 +17655,7 @@ pub unsafe fn _mm_maskz_sll_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __
/// Shift packed 32-bit integers in a right by count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srl_epi32&expand=5492)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srl_epi32&expand=5492)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrld))]
@@ -17665,7 +17665,7 @@ pub unsafe fn _mm512_srl_epi32(a: __m512i, count: __m128i) -> __m512i {
/// Shift packed 32-bit integers in a right by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srl_epi32&expand=5490)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srl_epi32&expand=5490)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrld))]
@@ -17681,7 +17681,7 @@ pub unsafe fn _mm512_mask_srl_epi32(
/// Shift packed 32-bit integers in a right by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srl_epi32&expand=5491)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srl_epi32&expand=5491)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrld))]
@@ -17693,7 +17693,7 @@ pub unsafe fn _mm512_maskz_srl_epi32(k: __mmask16, a: __m512i, count: __m128i) -
/// Shift packed 32-bit integers in a right by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srl_epi32&expand=5487)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srl_epi32&expand=5487)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrld))]
@@ -17709,7 +17709,7 @@ pub unsafe fn _mm256_mask_srl_epi32(
/// Shift packed 32-bit integers in a right by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srl_epi32&expand=5488)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srl_epi32&expand=5488)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrld))]
@@ -17721,7 +17721,7 @@ pub unsafe fn _mm256_maskz_srl_epi32(k: __mmask8, a: __m256i, count: __m128i) ->
/// Shift packed 32-bit integers in a right by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srl_epi32&expand=5484)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srl_epi32&expand=5484)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrld))]
@@ -17732,7 +17732,7 @@ pub unsafe fn _mm_mask_srl_epi32(src: __m128i, k: __mmask8, a: __m128i, count: _
/// Shift packed 32-bit integers in a right by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srl_epi32&expand=5485)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srl_epi32&expand=5485)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrld))]
@@ -17744,7 +17744,7 @@ pub unsafe fn _mm_maskz_srl_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __
/// Shift packed 64-bit integers in a left by count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sll_epi64&expand=5289)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sll_epi64&expand=5289)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsllq))]
@@ -17754,7 +17754,7 @@ pub unsafe fn _mm512_sll_epi64(a: __m512i, count: __m128i) -> __m512i {
/// Shift packed 64-bit integers in a left by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sll_epi64&expand=5287)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sll_epi64&expand=5287)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsllq))]
@@ -17770,7 +17770,7 @@ pub unsafe fn _mm512_mask_sll_epi64(
/// Shift packed 64-bit integers in a left by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sll_epi64&expand=5288)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sll_epi64&expand=5288)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsllq))]
@@ -17782,7 +17782,7 @@ pub unsafe fn _mm512_maskz_sll_epi64(k: __mmask8, a: __m512i, count: __m128i) ->
/// Shift packed 64-bit integers in a left by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sll_epi64&expand=5284)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sll_epi64&expand=5284)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllq))]
@@ -17798,7 +17798,7 @@ pub unsafe fn _mm256_mask_sll_epi64(
/// Shift packed 64-bit integers in a left by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sll_epi64&expand=5285)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sll_epi64&expand=5285)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllq))]
@@ -17810,7 +17810,7 @@ pub unsafe fn _mm256_maskz_sll_epi64(k: __mmask8, a: __m256i, count: __m128i) ->
/// Shift packed 64-bit integers in a left by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sll_epi64&expand=5281)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sll_epi64&expand=5281)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllq))]
@@ -17821,7 +17821,7 @@ pub unsafe fn _mm_mask_sll_epi64(src: __m128i, k: __mmask8, a: __m128i, count: _
/// Shift packed 64-bit integers in a left by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sll_epi64&expand=5282)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sll_epi64&expand=5282)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllq))]
@@ -17833,7 +17833,7 @@ pub unsafe fn _mm_maskz_sll_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __
/// Shift packed 64-bit integers in a right by count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srl_epi64&expand=5501)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srl_epi64&expand=5501)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrlq))]
@@ -17843,7 +17843,7 @@ pub unsafe fn _mm512_srl_epi64(a: __m512i, count: __m128i) -> __m512i {
/// Shift packed 64-bit integers in a right by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srl_epi64&expand=5499)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srl_epi64&expand=5499)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrlq))]
@@ -17859,7 +17859,7 @@ pub unsafe fn _mm512_mask_srl_epi64(
/// Shift packed 64-bit integers in a right by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srl_epi64&expand=5500)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srl_epi64&expand=5500)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrlq))]
@@ -17871,7 +17871,7 @@ pub unsafe fn _mm512_maskz_srl_epi64(k: __mmask8, a: __m512i, count: __m128i) ->
/// Shift packed 64-bit integers in a right by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srl_epi64&expand=5496)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srl_epi64&expand=5496)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlq))]
@@ -17887,7 +17887,7 @@ pub unsafe fn _mm256_mask_srl_epi64(
/// Shift packed 64-bit integers in a right by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srl_epi64&expand=5497)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srl_epi64&expand=5497)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlq))]
@@ -17899,7 +17899,7 @@ pub unsafe fn _mm256_maskz_srl_epi64(k: __mmask8, a: __m256i, count: __m128i) ->
/// Shift packed 64-bit integers in a right by count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srl_epi64&expand=5493)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srl_epi64&expand=5493)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlq))]
@@ -17910,7 +17910,7 @@ pub unsafe fn _mm_mask_srl_epi64(src: __m128i, k: __mmask8, a: __m128i, count: _
/// Shift packed 64-bit integers in a right by count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srl_epi64&expand=5494)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srl_epi64&expand=5494)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlq))]
@@ -17922,7 +17922,7 @@ pub unsafe fn _mm_maskz_srl_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __
/// Shift packed 32-bit integers in a right by count while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sra_epi32&expand=5407)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sra_epi32&expand=5407)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrad))]
@@ -17932,7 +17932,7 @@ pub unsafe fn _mm512_sra_epi32(a: __m512i, count: __m128i) -> __m512i {
/// Shift packed 32-bit integers in a right by count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sra_epi32&expand=5405)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sra_epi32&expand=5405)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrad))]
@@ -17948,7 +17948,7 @@ pub unsafe fn _mm512_mask_sra_epi32(
/// Shift packed 32-bit integers in a right by count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sra_epi32&expand=5406)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sra_epi32&expand=5406)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrad))]
@@ -17960,7 +17960,7 @@ pub unsafe fn _mm512_maskz_sra_epi32(k: __mmask16, a: __m512i, count: __m128i) -
/// Shift packed 32-bit integers in a right by count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sra_epi32&expand=5402)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sra_epi32&expand=5402)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrad))]
@@ -17976,7 +17976,7 @@ pub unsafe fn _mm256_mask_sra_epi32(
/// Shift packed 32-bit integers in a right by count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sra_epi32&expand=5403)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sra_epi32&expand=5403)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrad))]
@@ -17988,7 +17988,7 @@ pub unsafe fn _mm256_maskz_sra_epi32(k: __mmask8, a: __m256i, count: __m128i) ->
/// Shift packed 32-bit integers in a right by count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sra_epi32&expand=5399)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sra_epi32&expand=5399)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrad))]
@@ -17999,7 +17999,7 @@ pub unsafe fn _mm_mask_sra_epi32(src: __m128i, k: __mmask8, a: __m128i, count: _
/// Shift packed 32-bit integers in a right by count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sra_epi32&expand=5400)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sra_epi32&expand=5400)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrad))]
@@ -18011,7 +18011,7 @@ pub unsafe fn _mm_maskz_sra_epi32(k: __mmask8, a: __m128i, count: __m128i) -> __
/// Shift packed 64-bit integers in a right by count while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sra_epi64&expand=5416)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sra_epi64&expand=5416)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsraq))]
@@ -18021,7 +18021,7 @@ pub unsafe fn _mm512_sra_epi64(a: __m512i, count: __m128i) -> __m512i {
/// Shift packed 64-bit integers in a right by count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sra_epi64&expand=5414)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sra_epi64&expand=5414)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsraq))]
@@ -18037,7 +18037,7 @@ pub unsafe fn _mm512_mask_sra_epi64(
/// Shift packed 64-bit integers in a right by count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sra_epi64&expand=5415)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sra_epi64&expand=5415)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsraq))]
@@ -18049,7 +18049,7 @@ pub unsafe fn _mm512_maskz_sra_epi64(k: __mmask8, a: __m512i, count: __m128i) ->
/// Shift packed 64-bit integers in a right by count while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sra_epi64&expand=5413)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_sra_epi64&expand=5413)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraq))]
@@ -18059,7 +18059,7 @@ pub unsafe fn _mm256_sra_epi64(a: __m256i, count: __m128i) -> __m256i {
/// Shift packed 64-bit integers in a right by count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sra_epi64&expand=5411)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sra_epi64&expand=5411)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraq))]
@@ -18075,7 +18075,7 @@ pub unsafe fn _mm256_mask_sra_epi64(
/// Shift packed 64-bit integers in a right by count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sra_epi64&expand=5412)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sra_epi64&expand=5412)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraq))]
@@ -18087,7 +18087,7 @@ pub unsafe fn _mm256_maskz_sra_epi64(k: __mmask8, a: __m256i, count: __m128i) ->
/// Shift packed 64-bit integers in a right by count while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sra_epi64&expand=5410)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi64&expand=5410)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraq))]
@@ -18097,7 +18097,7 @@ pub unsafe fn _mm_sra_epi64(a: __m128i, count: __m128i) -> __m128i {
/// Shift packed 64-bit integers in a right by count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sra_epi64&expand=5408)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sra_epi64&expand=5408)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraq))]
@@ -18108,7 +18108,7 @@ pub unsafe fn _mm_mask_sra_epi64(src: __m128i, k: __mmask8, a: __m128i, count: _
/// Shift packed 64-bit integers in a right by count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sra_epi64&expand=5409)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sra_epi64&expand=5409)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraq))]
@@ -18120,13 +18120,13 @@ pub unsafe fn _mm_maskz_sra_epi64(k: __mmask8, a: __m128i, count: __m128i) -> __
/// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srai_epi32&expand=5436)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srai_epi32&expand=5436)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_srai_epi32<const IMM8: u32>(a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let r = vpsraid512(a, IMM8);
transmute(r)
@@ -18134,7 +18134,7 @@ pub unsafe fn _mm512_srai_epi32<const IMM8: u32>(a: __m512i) -> __m512i {
/// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srai_epi32&expand=5434)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srai_epi32&expand=5434)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))]
@@ -18144,7 +18144,7 @@ pub unsafe fn _mm512_mask_srai_epi32<const IMM8: u32>(
k: __mmask16,
a: __m512i,
) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let r = vpsraid512(a, IMM8);
transmute(simd_select_bitmask(k, r, src.as_i32x16()))
@@ -18152,13 +18152,13 @@ pub unsafe fn _mm512_mask_srai_epi32<const IMM8: u32>(
/// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srai_epi32&expand=5435)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srai_epi32&expand=5435)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_srai_epi32<const IMM8: u32>(k: __mmask16, a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let r = vpsraid512(a, IMM8);
let zero = _mm512_setzero_si512().as_i32x16();
@@ -18167,7 +18167,7 @@ pub unsafe fn _mm512_maskz_srai_epi32<const IMM8: u32>(k: __mmask16, a: __m512i)
/// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srai_epi32&expand=5431)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srai_epi32&expand=5431)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))]
@@ -18184,7 +18184,7 @@ pub unsafe fn _mm256_mask_srai_epi32<const IMM8: u32>(
/// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srai_epi32&expand=5432)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srai_epi32&expand=5432)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))]
@@ -18198,7 +18198,7 @@ pub unsafe fn _mm256_maskz_srai_epi32<const IMM8: u32>(k: __mmask8, a: __m256i)
/// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srai_epi32&expand=5428)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srai_epi32&expand=5428)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))]
@@ -18215,7 +18215,7 @@ pub unsafe fn _mm_mask_srai_epi32<const IMM8: u32>(
/// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srai_epi32&expand=5429)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srai_epi32&expand=5429)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrad, IMM8 = 1))]
@@ -18229,13 +18229,13 @@ pub unsafe fn _mm_maskz_srai_epi32<const IMM8: u32>(k: __mmask8, a: __m128i) ->
/// Shift packed 64-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srai_epi64&expand=5445)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srai_epi64&expand=5445)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_srai_epi64<const IMM8: u32>(a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let r = vpsraiq(a, IMM8);
transmute(r)
@@ -18243,7 +18243,7 @@ pub unsafe fn _mm512_srai_epi64<const IMM8: u32>(a: __m512i) -> __m512i {
/// Shift packed 64-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srai_epi64&expand=5443)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srai_epi64&expand=5443)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))]
@@ -18253,7 +18253,7 @@ pub unsafe fn _mm512_mask_srai_epi64<const IMM8: u32>(
k: __mmask8,
a: __m512i,
) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let shf = vpsraiq(a, IMM8);
transmute(simd_select_bitmask(k, shf, src.as_i64x8()))
@@ -18261,13 +18261,13 @@ pub unsafe fn _mm512_mask_srai_epi64<const IMM8: u32>(
/// Shift packed 64-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srai_epi64&expand=5444)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srai_epi64&expand=5444)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_srai_epi64<const IMM8: u32>(k: __mmask8, a: __m512i) -> __m512i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x8();
let shf = vpsraiq(a, IMM8);
let zero = _mm512_setzero_si512().as_i64x8();
@@ -18276,13 +18276,13 @@ pub unsafe fn _mm512_maskz_srai_epi64<const IMM8: u32>(k: __mmask8, a: __m512i)
/// Shift packed 64-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srai_epi64&expand=5442)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srai_epi64&expand=5442)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm256_srai_epi64<const IMM8: u32>(a: __m256i) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x4();
let r = vpsraiq256(a, IMM8);
transmute(r)
@@ -18290,7 +18290,7 @@ pub unsafe fn _mm256_srai_epi64<const IMM8: u32>(a: __m256i) -> __m256i {
/// Shift packed 64-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srai_epi64&expand=5440)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srai_epi64&expand=5440)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))]
@@ -18300,7 +18300,7 @@ pub unsafe fn _mm256_mask_srai_epi64<const IMM8: u32>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x4();
let shf = vpsraiq256(a, IMM8);
transmute(simd_select_bitmask(k, shf, src.as_i64x4()))
@@ -18308,13 +18308,13 @@ pub unsafe fn _mm256_mask_srai_epi64<const IMM8: u32>(
/// Shift packed 64-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srai_epi64&expand=5441)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srai_epi64&expand=5441)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_srai_epi64<const IMM8: u32>(k: __mmask8, a: __m256i) -> __m256i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x4();
let shf = vpsraiq256(a, IMM8);
let zero = _mm256_setzero_si256().as_i64x4();
@@ -18323,13 +18323,13 @@ pub unsafe fn _mm256_maskz_srai_epi64<const IMM8: u32>(k: __mmask8, a: __m256i)
/// Shift packed 64-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srai_epi64&expand=5439)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi64&expand=5439)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm_srai_epi64<const IMM8: u32>(a: __m128i) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x2();
let r = vpsraiq128(a, IMM8);
transmute(r)
@@ -18337,7 +18337,7 @@ pub unsafe fn _mm_srai_epi64<const IMM8: u32>(a: __m128i) -> __m128i {
/// Shift packed 64-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srai_epi64&expand=5437)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srai_epi64&expand=5437)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))]
@@ -18347,7 +18347,7 @@ pub unsafe fn _mm_mask_srai_epi64<const IMM8: u32>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x2();
let shf = vpsraiq128(a, IMM8);
transmute(simd_select_bitmask(k, shf, src.as_i64x2()))
@@ -18355,13 +18355,13 @@ pub unsafe fn _mm_mask_srai_epi64<const IMM8: u32>(
/// Shift packed 64-bit integers in a right by imm8 while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srai_epi64&expand=5438)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srai_epi64&expand=5438)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsraq, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_srai_epi64<const IMM8: u32>(k: __mmask8, a: __m128i) -> __m128i {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i64x2();
let shf = vpsraiq128(a, IMM8);
let zero = _mm_setzero_si128().as_i64x2();
@@ -18370,7 +18370,7 @@ pub unsafe fn _mm_maskz_srai_epi64<const IMM8: u32>(k: __mmask8, a: __m128i) ->
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srav_epi32&expand=5465)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srav_epi32&expand=5465)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsravd))]
@@ -18380,7 +18380,7 @@ pub unsafe fn _mm512_srav_epi32(a: __m512i, count: __m512i) -> __m512i {
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srav_epi32&expand=5463)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srav_epi32&expand=5463)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsravd))]
@@ -18396,7 +18396,7 @@ pub unsafe fn _mm512_mask_srav_epi32(
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srav_epi32&expand=5464)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srav_epi32&expand=5464)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsravd))]
@@ -18408,7 +18408,7 @@ pub unsafe fn _mm512_maskz_srav_epi32(k: __mmask16, a: __m512i, count: __m512i)
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srav_epi32&expand=5460)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srav_epi32&expand=5460)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravd))]
@@ -18424,7 +18424,7 @@ pub unsafe fn _mm256_mask_srav_epi32(
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srav_epi32&expand=5461)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srav_epi32&expand=5461)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravd))]
@@ -18436,7 +18436,7 @@ pub unsafe fn _mm256_maskz_srav_epi32(k: __mmask8, a: __m256i, count: __m256i) -
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srav_epi32&expand=5457)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srav_epi32&expand=5457)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravd))]
@@ -18452,7 +18452,7 @@ pub unsafe fn _mm_mask_srav_epi32(
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srav_epi32&expand=5458)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srav_epi32&expand=5458)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravd))]
@@ -18464,7 +18464,7 @@ pub unsafe fn _mm_maskz_srav_epi32(k: __mmask8, a: __m128i, count: __m128i) -> _
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srav_epi64&expand=5474)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srav_epi64&expand=5474)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsravq))]
@@ -18474,7 +18474,7 @@ pub unsafe fn _mm512_srav_epi64(a: __m512i, count: __m512i) -> __m512i {
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srav_epi64&expand=5472)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srav_epi64&expand=5472)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsravq))]
@@ -18490,7 +18490,7 @@ pub unsafe fn _mm512_mask_srav_epi64(
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srav_epi64&expand=5473)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srav_epi64&expand=5473)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsravq))]
@@ -18502,7 +18502,7 @@ pub unsafe fn _mm512_maskz_srav_epi64(k: __mmask8, a: __m512i, count: __m512i) -
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_srav_epi64&expand=5471)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_srav_epi64&expand=5471)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravq))]
@@ -18512,7 +18512,7 @@ pub unsafe fn _mm256_srav_epi64(a: __m256i, count: __m256i) -> __m256i {
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srav_epi64&expand=5469)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srav_epi64&expand=5469)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravq))]
@@ -18528,7 +18528,7 @@ pub unsafe fn _mm256_mask_srav_epi64(
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srav_epi64&expand=5470)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srav_epi64&expand=5470)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravq))]
@@ -18540,7 +18540,7 @@ pub unsafe fn _mm256_maskz_srav_epi64(k: __mmask8, a: __m256i, count: __m256i) -
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srav_epi64&expand=5468)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srav_epi64&expand=5468)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravq))]
@@ -18550,7 +18550,7 @@ pub unsafe fn _mm_srav_epi64(a: __m128i, count: __m128i) -> __m128i {
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srav_epi64&expand=5466)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srav_epi64&expand=5466)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravq))]
@@ -18566,7 +18566,7 @@ pub unsafe fn _mm_mask_srav_epi64(
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in sign bits, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srav_epi64&expand=5467)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srav_epi64&expand=5467)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsravq))]
@@ -18578,7 +18578,7 @@ pub unsafe fn _mm_maskz_srav_epi64(k: __mmask8, a: __m128i, count: __m128i) -> _
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rolv_epi32&expand=4703)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rolv_epi32&expand=4703)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprolvd))]
@@ -18588,7 +18588,7 @@ pub unsafe fn _mm512_rolv_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rolv_epi32&expand=4701)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rolv_epi32&expand=4701)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprolvd))]
@@ -18604,7 +18604,7 @@ pub unsafe fn _mm512_mask_rolv_epi32(
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rolv_epi32&expand=4702)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rolv_epi32&expand=4702)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprolvd))]
@@ -18616,7 +18616,7 @@ pub unsafe fn _mm512_maskz_rolv_epi32(k: __mmask16, a: __m512i, b: __m512i) -> _
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rolv_epi32&expand=4700)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rolv_epi32&expand=4700)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolvd))]
@@ -18626,7 +18626,7 @@ pub unsafe fn _mm256_rolv_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rolv_epi3&expand=4698)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rolv_epi3&expand=4698)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolvd))]
@@ -18637,7 +18637,7 @@ pub unsafe fn _mm256_mask_rolv_epi32(src: __m256i, k: __mmask8, a: __m256i, b: _
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rolv_epi32&expand=4699)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rolv_epi32&expand=4699)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolvd))]
@@ -18649,7 +18649,7 @@ pub unsafe fn _mm256_maskz_rolv_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rolv_epi32&expand=4697)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rolv_epi32&expand=4697)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolvd))]
@@ -18659,7 +18659,7 @@ pub unsafe fn _mm_rolv_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rolv_epi32&expand=4695)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rolv_epi32&expand=4695)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolvd))]
@@ -18670,7 +18670,7 @@ pub unsafe fn _mm_mask_rolv_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m1
/// Rotate the bits in each packed 32-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rolv_epi32&expand=4696)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rolv_epi32&expand=4696)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolvd))]
@@ -18682,7 +18682,7 @@ pub unsafe fn _mm_maskz_rolv_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m12
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rorv_epi32&expand=4739)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rorv_epi32&expand=4739)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprorvd))]
@@ -18692,7 +18692,7 @@ pub unsafe fn _mm512_rorv_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rorv_epi32&expand=4737)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rorv_epi32&expand=4737)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprorvd))]
@@ -18708,7 +18708,7 @@ pub unsafe fn _mm512_mask_rorv_epi32(
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rorv_epi32&expand=4738)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rorv_epi32&expand=4738)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprorvd))]
@@ -18720,7 +18720,7 @@ pub unsafe fn _mm512_maskz_rorv_epi32(k: __mmask16, a: __m512i, b: __m512i) -> _
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rorv_epi32&expand=4736)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rorv_epi32&expand=4736)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprorvd))]
@@ -18730,7 +18730,7 @@ pub unsafe fn _mm256_rorv_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rorv_epi32&expand=4734)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rorv_epi32&expand=4734)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprorvd))]
@@ -18741,7 +18741,7 @@ pub unsafe fn _mm256_mask_rorv_epi32(src: __m256i, k: __mmask8, a: __m256i, b: _
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rorv_epi32&expand=4735)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rorv_epi32&expand=4735)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprorvd))]
@@ -18753,7 +18753,7 @@ pub unsafe fn _mm256_maskz_rorv_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rorv_epi32&expand=4733)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rorv_epi32&expand=4733)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprorvd))]
@@ -18763,7 +18763,7 @@ pub unsafe fn _mm_rorv_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rorv_epi32&expand=4731)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rorv_epi32&expand=4731)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprorvd))]
@@ -18774,7 +18774,7 @@ pub unsafe fn _mm_mask_rorv_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m1
/// Rotate the bits in each packed 32-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rorv_epi32&expand=4732)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rorv_epi32&expand=4732)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprorvd))]
@@ -18786,7 +18786,7 @@ pub unsafe fn _mm_maskz_rorv_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m12
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rolv_epi64&expand=4712)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rolv_epi64&expand=4712)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprolvq))]
@@ -18796,7 +18796,7 @@ pub unsafe fn _mm512_rolv_epi64(a: __m512i, b: __m512i) -> __m512i {
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rolv_epi64&expand=4710)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rolv_epi64&expand=4710)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprolvq))]
@@ -18807,7 +18807,7 @@ pub unsafe fn _mm512_mask_rolv_epi64(src: __m512i, k: __mmask8, a: __m512i, b: _
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rolv_epi64&expand=4711)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rolv_epi64&expand=4711)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprolvq))]
@@ -18819,7 +18819,7 @@ pub unsafe fn _mm512_maskz_rolv_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rolv_epi64&expand=4709)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rolv_epi64&expand=4709)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolvq))]
@@ -18829,7 +18829,7 @@ pub unsafe fn _mm256_rolv_epi64(a: __m256i, b: __m256i) -> __m256i {
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rolv_epi64&expand=4707)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rolv_epi64&expand=4707)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolvq))]
@@ -18840,7 +18840,7 @@ pub unsafe fn _mm256_mask_rolv_epi64(src: __m256i, k: __mmask8, a: __m256i, b: _
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rolv_epi64&expand=4708)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rolv_epi64&expand=4708)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolvq))]
@@ -18852,7 +18852,7 @@ pub unsafe fn _mm256_maskz_rolv_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rolv_epi64&expand=4706)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rolv_epi64&expand=4706)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolvq))]
@@ -18862,7 +18862,7 @@ pub unsafe fn _mm_rolv_epi64(a: __m128i, b: __m128i) -> __m128i {
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rolv_epi64&expand=4704)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rolv_epi64&expand=4704)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolvq))]
@@ -18873,7 +18873,7 @@ pub unsafe fn _mm_mask_rolv_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m1
/// Rotate the bits in each packed 64-bit integer in a to the left by the number of bits specified in the corresponding element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rolv_epi64&expand=4705)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rolv_epi64&expand=4705)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprolvq))]
@@ -18885,7 +18885,7 @@ pub unsafe fn _mm_maskz_rolv_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m12
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rorv_epi64&expand=4748)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_rorv_epi64&expand=4748)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprorvq))]
@@ -18895,7 +18895,7 @@ pub unsafe fn _mm512_rorv_epi64(a: __m512i, b: __m512i) -> __m512i {
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rorv_epi64&expand=4746)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_rorv_epi64&expand=4746)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprorvq))]
@@ -18906,7 +18906,7 @@ pub unsafe fn _mm512_mask_rorv_epi64(src: __m512i, k: __mmask8, a: __m512i, b: _
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rorv_epi64&expand=4747)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_rorv_epi64&expand=4747)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vprorvq))]
@@ -18918,7 +18918,7 @@ pub unsafe fn _mm512_maskz_rorv_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rorv_epi64&expand=4745)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_rorv_epi64&expand=4745)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprorvq))]
@@ -18928,7 +18928,7 @@ pub unsafe fn _mm256_rorv_epi64(a: __m256i, b: __m256i) -> __m256i {
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rorv_epi64&expand=4743)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_rorv_epi64&expand=4743)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprorvq))]
@@ -18939,7 +18939,7 @@ pub unsafe fn _mm256_mask_rorv_epi64(src: __m256i, k: __mmask8, a: __m256i, b: _
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rorv_epi64&expand=4744)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_rorv_epi64&expand=4744)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprorvq))]
@@ -18951,7 +18951,7 @@ pub unsafe fn _mm256_maskz_rorv_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rorv_epi64&expand=4742)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rorv_epi64&expand=4742)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprorvq))]
@@ -18961,7 +18961,7 @@ pub unsafe fn _mm_rorv_epi64(a: __m128i, b: __m128i) -> __m128i {
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rorv_epi64&expand=4740)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_rorv_epi64&expand=4740)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprorvq))]
@@ -18972,7 +18972,7 @@ pub unsafe fn _mm_mask_rorv_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m1
/// Rotate the bits in each packed 64-bit integer in a to the right by the number of bits specified in the corresponding element of b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rorv_epi64&expand=4741)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_rorv_epi64&expand=4741)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vprorvq))]
@@ -18984,7 +18984,7 @@ pub unsafe fn _mm_maskz_rorv_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m12
/// Shift packed 32-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sllv_epi32&expand=5342)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sllv_epi32&expand=5342)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsllvd))]
@@ -18994,7 +18994,7 @@ pub unsafe fn _mm512_sllv_epi32(a: __m512i, count: __m512i) -> __m512i {
/// Shift packed 32-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sllv_epi32&expand=5340)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sllv_epi32&expand=5340)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsllvd))]
@@ -19010,7 +19010,7 @@ pub unsafe fn _mm512_mask_sllv_epi32(
/// Shift packed 32-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sllv_epi32&expand=5341)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sllv_epi32&expand=5341)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsllvd))]
@@ -19022,7 +19022,7 @@ pub unsafe fn _mm512_maskz_sllv_epi32(k: __mmask16, a: __m512i, count: __m512i)
/// Shift packed 32-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sllv_epi32&expand=5337)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sllv_epi32&expand=5337)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvd))]
@@ -19038,7 +19038,7 @@ pub unsafe fn _mm256_mask_sllv_epi32(
/// Shift packed 32-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sllv_epi32&expand=5338)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sllv_epi32&expand=5338)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvd))]
@@ -19050,7 +19050,7 @@ pub unsafe fn _mm256_maskz_sllv_epi32(k: __mmask8, a: __m256i, count: __m256i) -
/// Shift packed 32-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sllv_epi32&expand=5334)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sllv_epi32&expand=5334)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvd))]
@@ -19066,7 +19066,7 @@ pub unsafe fn _mm_mask_sllv_epi32(
/// Shift packed 32-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sllv_epi32&expand=5335)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sllv_epi32&expand=5335)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvd))]
@@ -19078,7 +19078,7 @@ pub unsafe fn _mm_maskz_sllv_epi32(k: __mmask8, a: __m128i, count: __m128i) -> _
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srlv_epi32&expand=5554)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srlv_epi32&expand=5554)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrlvd))]
@@ -19088,7 +19088,7 @@ pub unsafe fn _mm512_srlv_epi32(a: __m512i, count: __m512i) -> __m512i {
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srlv_epi32&expand=5552)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srlv_epi32&expand=5552)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrlvd))]
@@ -19104,7 +19104,7 @@ pub unsafe fn _mm512_mask_srlv_epi32(
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srlv_epi32&expand=5553)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srlv_epi32&expand=5553)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrlvd))]
@@ -19116,7 +19116,7 @@ pub unsafe fn _mm512_maskz_srlv_epi32(k: __mmask16, a: __m512i, count: __m512i)
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srlv_epi32&expand=5549)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srlv_epi32&expand=5549)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvd))]
@@ -19132,7 +19132,7 @@ pub unsafe fn _mm256_mask_srlv_epi32(
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srlv_epi32&expand=5550)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srlv_epi32&expand=5550)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvd))]
@@ -19144,7 +19144,7 @@ pub unsafe fn _mm256_maskz_srlv_epi32(k: __mmask8, a: __m256i, count: __m256i) -
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srlv_epi32&expand=5546)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srlv_epi32&expand=5546)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvd))]
@@ -19160,7 +19160,7 @@ pub unsafe fn _mm_mask_srlv_epi32(
/// Shift packed 32-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srlv_epi32&expand=5547)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srlv_epi32&expand=5547)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvd))]
@@ -19172,7 +19172,7 @@ pub unsafe fn _mm_maskz_srlv_epi32(k: __mmask8, a: __m128i, count: __m128i) -> _
/// Shift packed 64-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sllv_epi64&expand=5351)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_sllv_epi64&expand=5351)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsllvq))]
@@ -19182,7 +19182,7 @@ pub unsafe fn _mm512_sllv_epi64(a: __m512i, count: __m512i) -> __m512i {
/// Shift packed 64-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sllv_epi64&expand=5349)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_sllv_epi64&expand=5349)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsllvq))]
@@ -19198,7 +19198,7 @@ pub unsafe fn _mm512_mask_sllv_epi64(
/// Shift packed 64-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sllv_epi64&expand=5350)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_sllv_epi64&expand=5350)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsllvq))]
@@ -19210,7 +19210,7 @@ pub unsafe fn _mm512_maskz_sllv_epi64(k: __mmask8, a: __m512i, count: __m512i) -
/// Shift packed 64-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sllv_epi64&expand=5346)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_sllv_epi64&expand=5346)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvq))]
@@ -19226,7 +19226,7 @@ pub unsafe fn _mm256_mask_sllv_epi64(
/// Shift packed 64-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sllv_epi64&expand=5347)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_sllv_epi64&expand=5347)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvq))]
@@ -19238,7 +19238,7 @@ pub unsafe fn _mm256_maskz_sllv_epi64(k: __mmask8, a: __m256i, count: __m256i) -
/// Shift packed 64-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sllv_epi64&expand=5343)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_sllv_epi64&expand=5343)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvq))]
@@ -19254,7 +19254,7 @@ pub unsafe fn _mm_mask_sllv_epi64(
/// Shift packed 64-bit integers in a left by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sllv_epi64&expand=5344)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_sllv_epi64&expand=5344)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsllvq))]
@@ -19266,7 +19266,7 @@ pub unsafe fn _mm_maskz_sllv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> _
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_srlv_epi64&expand=5563)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_srlv_epi64&expand=5563)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrlvq))]
@@ -19276,7 +19276,7 @@ pub unsafe fn _mm512_srlv_epi64(a: __m512i, count: __m512i) -> __m512i {
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_srlv_epi64&expand=5561)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_srlv_epi64&expand=5561)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrlvq))]
@@ -19292,7 +19292,7 @@ pub unsafe fn _mm512_mask_srlv_epi64(
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_srlv_epi64&expand=5562)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_srlv_epi64&expand=5562)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpsrlvq))]
@@ -19304,7 +19304,7 @@ pub unsafe fn _mm512_maskz_srlv_epi64(k: __mmask8, a: __m512i, count: __m512i) -
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_srlv_epi64&expand=5558)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_srlv_epi64&expand=5558)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvq))]
@@ -19320,7 +19320,7 @@ pub unsafe fn _mm256_mask_srlv_epi64(
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_srlv_epi64&expand=5559)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_srlv_epi64&expand=5559)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvq))]
@@ -19332,7 +19332,7 @@ pub unsafe fn _mm256_maskz_srlv_epi64(k: __mmask8, a: __m256i, count: __m256i) -
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_srlv_epi64&expand=5555)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_srlv_epi64&expand=5555)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvq))]
@@ -19348,7 +19348,7 @@ pub unsafe fn _mm_mask_srlv_epi64(
/// Shift packed 64-bit integers in a right by the amount specified by the corresponding element in count while shifting in zeros, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_srlv_epi64&expand=5556)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_srlv_epi64&expand=5556)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpsrlvq))]
@@ -19360,17 +19360,17 @@ pub unsafe fn _mm_maskz_srlv_epi64(k: __mmask8, a: __m128i, count: __m128i) -> _
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permute_ps&expand=4170)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permute_ps&expand=4170)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_permute_ps<const MASK: i32>(a: __m512) -> __m512 {
- static_assert_imm8!(MASK);
- simd_shuffle16!(
+ static_assert_uimm_bits!(MASK, 8);
+ simd_shuffle!(
a,
a,
- <const MASK: i32> [
+ [
MASK as u32 & 0b11,
(MASK as u32 >> 2) & 0b11,
((MASK as u32 >> 4) & 0b11),
@@ -19393,7 +19393,7 @@ pub unsafe fn _mm512_permute_ps<const MASK: i32>(a: __m512) -> __m512 {
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permute_ps&expand=4168)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permute_ps&expand=4168)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
@@ -19403,20 +19403,20 @@ pub unsafe fn _mm512_mask_permute_ps<const MASK: i32>(
k: __mmask16,
a: __m512,
) -> __m512 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_permute_ps::<MASK>(a);
transmute(simd_select_bitmask(k, r.as_f32x16(), src.as_f32x16()))
}
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permute_ps&expand=4169)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permute_ps&expand=4169)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_permute_ps<const MASK: i32>(k: __mmask16, a: __m512) -> __m512 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_permute_ps::<MASK>(a);
let zero = _mm512_setzero_ps().as_f32x16();
transmute(simd_select_bitmask(k, r.as_f32x16(), zero))
@@ -19424,7 +19424,7 @@ pub unsafe fn _mm512_maskz_permute_ps<const MASK: i32>(k: __mmask16, a: __m512)
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permute_ps&expand=4165)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permute_ps&expand=4165)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
@@ -19440,7 +19440,7 @@ pub unsafe fn _mm256_mask_permute_ps<const MASK: i32>(
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permute_ps&expand=4166)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permute_ps&expand=4166)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
@@ -19453,7 +19453,7 @@ pub unsafe fn _mm256_maskz_permute_ps<const MASK: i32>(k: __mmask8, a: __m256) -
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_permute_ps&expand=4162)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permute_ps&expand=4162)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
@@ -19465,7 +19465,7 @@ pub unsafe fn _mm_mask_permute_ps<const MASK: i32>(src: __m128, k: __mmask8, a:
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_permute_ps&expand=4163)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permute_ps&expand=4163)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilps, MASK = 0b11_00_01_11))]
@@ -19478,17 +19478,17 @@ pub unsafe fn _mm_maskz_permute_ps<const MASK: i32>(k: __mmask8, a: __m128) -> _
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permute_pd&expand=4161)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permute_pd&expand=4161)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01_10_01))]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_permute_pd<const MASK: i32>(a: __m512d) -> __m512d {
- static_assert_imm8!(MASK);
- simd_shuffle8!(
+ static_assert_uimm_bits!(MASK, 8);
+ simd_shuffle!(
a,
a,
- <const MASK: i32> [
+ [
MASK as u32 & 0b1,
((MASK as u32 >> 1) & 0b1),
((MASK as u32 >> 2) & 0b1) + 2,
@@ -19503,7 +19503,7 @@ pub unsafe fn _mm512_permute_pd<const MASK: i32>(a: __m512d) -> __m512d {
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permute_pd&expand=4159)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permute_pd&expand=4159)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01_10_01))]
@@ -19513,20 +19513,20 @@ pub unsafe fn _mm512_mask_permute_pd<const MASK: i32>(
k: __mmask8,
a: __m512d,
) -> __m512d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_permute_pd::<MASK>(a);
transmute(simd_select_bitmask(k, r.as_f64x8(), src.as_f64x8()))
}
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permute_pd&expand=4160)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permute_pd&expand=4160)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01_10_01))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_permute_pd<const MASK: i32>(k: __mmask8, a: __m512d) -> __m512d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_permute_pd::<MASK>(a);
let zero = _mm512_setzero_pd().as_f64x8();
transmute(simd_select_bitmask(k, r.as_f64x8(), zero))
@@ -19534,7 +19534,7 @@ pub unsafe fn _mm512_maskz_permute_pd<const MASK: i32>(k: __mmask8, a: __m512d)
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permute_pd&expand=4156)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permute_pd&expand=4156)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01))]
@@ -19544,20 +19544,20 @@ pub unsafe fn _mm256_mask_permute_pd<const MASK: i32>(
k: __mmask8,
a: __m256d,
) -> __m256d {
- static_assert_imm4!(MASK);
+ static_assert_uimm_bits!(MASK, 4);
let r = _mm256_permute_pd::<MASK>(a);
transmute(simd_select_bitmask(k, r.as_f64x4(), src.as_f64x4()))
}
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permute_pd&expand=4157)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permute_pd&expand=4157)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilpd, MASK = 0b11_01))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_permute_pd<const MASK: i32>(k: __mmask8, a: __m256d) -> __m256d {
- static_assert_imm4!(MASK);
+ static_assert_uimm_bits!(MASK, 4);
let r = _mm256_permute_pd::<MASK>(a);
let zero = _mm256_setzero_pd().as_f64x4();
transmute(simd_select_bitmask(k, r.as_f64x4(), zero))
@@ -19565,7 +19565,7 @@ pub unsafe fn _mm256_maskz_permute_pd<const MASK: i32>(k: __mmask8, a: __m256d)
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_permute_pd&expand=4153)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permute_pd&expand=4153)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilpd, IMM2 = 0b01))]
@@ -19575,20 +19575,20 @@ pub unsafe fn _mm_mask_permute_pd<const IMM2: i32>(
k: __mmask8,
a: __m128d,
) -> __m128d {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
let r = _mm_permute_pd::<IMM2>(a);
transmute(simd_select_bitmask(k, r.as_f64x2(), src.as_f64x2()))
}
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_permute_pd&expand=4154)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permute_pd&expand=4154)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilpd, IMM2 = 0b01))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_maskz_permute_pd<const IMM2: i32>(k: __mmask8, a: __m128d) -> __m128d {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
let r = _mm_permute_pd::<IMM2>(a);
let zero = _mm_setzero_pd().as_f64x2();
transmute(simd_select_bitmask(k, r.as_f64x2(), zero))
@@ -19596,17 +19596,17 @@ pub unsafe fn _mm_maskz_permute_pd<const IMM2: i32>(k: __mmask8, a: __m128d) ->
/// Shuffle 64-bit integers in a within 256-bit lanes using the control in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex_epi64&expand=4208)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex_epi64&expand=4208)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_permutex_epi64<const MASK: i32>(a: __m512i) -> __m512i {
- static_assert_imm8!(MASK);
- simd_shuffle8!(
+ static_assert_uimm_bits!(MASK, 8);
+ simd_shuffle!(
a,
a,
- <const MASK: i32> [
+ [
MASK as u32 & 0b11,
(MASK as u32 >> 2) & 0b11,
((MASK as u32 >> 4) & 0b11),
@@ -19621,7 +19621,7 @@ pub unsafe fn _mm512_permutex_epi64<const MASK: i32>(a: __m512i) -> __m512i {
/// Shuffle 64-bit integers in a within 256-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutex_epi64&expand=4206)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex_epi64&expand=4206)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq
@@ -19631,20 +19631,20 @@ pub unsafe fn _mm512_mask_permutex_epi64<const MASK: i32>(
k: __mmask8,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_permutex_epi64::<MASK>(a);
transmute(simd_select_bitmask(k, r.as_i64x8(), src.as_i64x8()))
}
/// Shuffle 64-bit integers in a within 256-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutex_epi64&expand=4207)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex_epi64&expand=4207)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_permutex_epi64<const MASK: i32>(k: __mmask8, a: __m512i) -> __m512i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_permutex_epi64::<MASK>(a);
let zero = _mm512_setzero_si512().as_i64x8();
transmute(simd_select_bitmask(k, r.as_i64x8(), zero))
@@ -19652,17 +19652,17 @@ pub unsafe fn _mm512_maskz_permutex_epi64<const MASK: i32>(k: __mmask8, a: __m51
/// Shuffle 64-bit integers in a within 256-bit lanes using the control in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex_epi64&expand=4205)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex_epi64&expand=4205)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm256_permutex_epi64<const MASK: i32>(a: __m256i) -> __m256i {
- static_assert_imm8!(MASK);
- simd_shuffle4!(
+ static_assert_uimm_bits!(MASK, 8);
+ simd_shuffle!(
a,
a,
- <const MASK: i32> [
+ [
MASK as u32 & 0b11,
(MASK as u32 >> 2) & 0b11,
((MASK as u32 >> 4) & 0b11),
@@ -19673,7 +19673,7 @@ pub unsafe fn _mm256_permutex_epi64<const MASK: i32>(a: __m256i) -> __m256i {
/// Shuffle 64-bit integers in a within 256-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutex_epi6&expand=4203)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex_epi6&expand=4203)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq
@@ -19683,20 +19683,20 @@ pub unsafe fn _mm256_mask_permutex_epi64<const MASK: i32>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_permutex_epi64::<MASK>(a);
transmute(simd_select_bitmask(k, r.as_i64x4(), src.as_i64x4()))
}
/// Shuffle 64-bit integers in a within 256-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutex_epi64&expand=4204)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex_epi64&expand=4204)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermq
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_permutex_epi64<const MASK: i32>(k: __mmask8, a: __m256i) -> __m256i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_permutex_epi64::<MASK>(a);
let zero = _mm256_setzero_si256().as_i64x4();
transmute(simd_select_bitmask(k, r.as_i64x4(), zero))
@@ -19704,17 +19704,17 @@ pub unsafe fn _mm256_maskz_permutex_epi64<const MASK: i32>(k: __mmask8, a: __m25
/// Shuffle double-precision (64-bit) floating-point elements in a within 256-bit lanes using the control in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex_pd&expand=4214)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex_pd&expand=4214)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_permutex_pd<const MASK: i32>(a: __m512d) -> __m512d {
- static_assert_imm8!(MASK);
- simd_shuffle8!(
+ static_assert_uimm_bits!(MASK, 8);
+ simd_shuffle!(
a,
a,
- <const MASK: i32> [
+ [
MASK as u32 & 0b11,
(MASK as u32 >> 2) & 0b11,
((MASK as u32 >> 4) & 0b11),
@@ -19729,7 +19729,7 @@ pub unsafe fn _mm512_permutex_pd<const MASK: i32>(a: __m512d) -> __m512d {
/// Shuffle double-precision (64-bit) floating-point elements in a within 256-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutex_pd&expand=4212)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex_pd&expand=4212)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd
@@ -19745,7 +19745,7 @@ pub unsafe fn _mm512_mask_permutex_pd<const MASK: i32>(
/// Shuffle double-precision (64-bit) floating-point elements in a within 256-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutex_pd&expand=4213)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex_pd&expand=4213)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd
@@ -19758,17 +19758,17 @@ pub unsafe fn _mm512_maskz_permutex_pd<const MASK: i32>(k: __mmask8, a: __m512d)
/// Shuffle double-precision (64-bit) floating-point elements in a within 256-bit lanes using the control in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex_pd&expand=4211)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex_pd&expand=4211)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm256_permutex_pd<const MASK: i32>(a: __m256d) -> __m256d {
- static_assert_imm8!(MASK);
- simd_shuffle4!(
+ static_assert_uimm_bits!(MASK, 8);
+ simd_shuffle!(
a,
a,
- <const MASK: i32> [
+ [
MASK as u32 & 0b11,
(MASK as u32 >> 2) & 0b11,
((MASK as u32 >> 4) & 0b11),
@@ -19779,7 +19779,7 @@ pub unsafe fn _mm256_permutex_pd<const MASK: i32>(a: __m256d) -> __m256d {
/// Shuffle double-precision (64-bit) floating-point elements in a within 256-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutex_pd&expand=4209)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex_pd&expand=4209)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd
@@ -19789,20 +19789,20 @@ pub unsafe fn _mm256_mask_permutex_pd<const MASK: i32>(
k: __mmask8,
a: __m256d,
) -> __m256d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_permutex_pd::<MASK>(a);
transmute(simd_select_bitmask(k, r.as_f64x4(), src.as_f64x4()))
}
/// Shuffle double-precision (64-bit) floating-point elements in a within 256-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutex_pd&expand=4210)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex_pd&expand=4210)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b10_01_10_11))] //should be vpermpd
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_permutex_pd<const MASK: i32>(k: __mmask8, a: __m256d) -> __m256d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_permutex_pd::<MASK>(a);
let zero = _mm256_setzero_pd().as_f64x4();
transmute(simd_select_bitmask(k, r.as_f64x4(), zero))
@@ -19810,7 +19810,7 @@ pub unsafe fn _mm256_maskz_permutex_pd<const MASK: i32>(k: __mmask8, a: __m256d)
/// Shuffle 32-bit integers in a across lanes using the corresponding index in idx, and store the results in dst. Note that this intrinsic shuffles across 128-bit lanes, unlike past intrinsics that use the permutevar name. This intrinsic is identical to _mm512_permutexvar_epi32, and it is recommended that you use that intrinsic name.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutevar_epi32&expand=4182)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutevar_epi32&expand=4182)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermd
@@ -19820,7 +19820,7 @@ pub unsafe fn _mm512_permutevar_epi32(idx: __m512i, a: __m512i) -> __m512i {
/// Shuffle 32-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). Note that this intrinsic shuffles across 128-bit lanes, unlike past intrinsics that use the permutevar name. This intrinsic is identical to _mm512_mask_permutexvar_epi32, and it is recommended that you use that intrinsic name.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutevar_epi32&expand=4181)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutevar_epi32&expand=4181)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermd))]
@@ -19836,7 +19836,7 @@ pub unsafe fn _mm512_mask_permutevar_epi32(
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutevar_ps&expand=4200)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutevar_ps&expand=4200)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilps))]
@@ -19846,7 +19846,7 @@ pub unsafe fn _mm512_permutevar_ps(a: __m512, b: __m512i) -> __m512 {
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutevar_ps&expand=4198)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutevar_ps&expand=4198)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilps))]
@@ -19862,7 +19862,7 @@ pub unsafe fn _mm512_mask_permutevar_ps(
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutevar_ps&expand=4199)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutevar_ps&expand=4199)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilps))]
@@ -19874,7 +19874,7 @@ pub unsafe fn _mm512_maskz_permutevar_ps(k: __mmask16, a: __m512, b: __m512i) ->
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm256_mask_permutevar_ps&expand=4195)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm256_mask_permutevar_ps&expand=4195)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilps))]
@@ -19885,7 +19885,7 @@ pub unsafe fn _mm256_mask_permutevar_ps(src: __m256, k: __mmask8, a: __m256, b:
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutevar_ps&expand=4196)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutevar_ps&expand=4196)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilps))]
@@ -19897,7 +19897,7 @@ pub unsafe fn _mm256_maskz_permutevar_ps(k: __mmask8, a: __m256, b: __m256i) ->
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_permutevar_ps&expand=4192)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutevar_ps&expand=4192)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilps))]
@@ -19908,7 +19908,7 @@ pub unsafe fn _mm_mask_permutevar_ps(src: __m128, k: __mmask8, a: __m128, b: __m
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_permutevar_ps&expand=4193)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutevar_ps&expand=4193)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilps))]
@@ -19920,7 +19920,7 @@ pub unsafe fn _mm_maskz_permutevar_ps(k: __mmask8, a: __m128, b: __m128i) -> __m
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutevar_pd&expand=4191)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutevar_pd&expand=4191)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilpd))]
@@ -19930,7 +19930,7 @@ pub unsafe fn _mm512_permutevar_pd(a: __m512d, b: __m512i) -> __m512d {
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutevar_pd&expand=4189)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutevar_pd&expand=4189)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilpd))]
@@ -19946,7 +19946,7 @@ pub unsafe fn _mm512_mask_permutevar_pd(
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutevar_pd&expand=4190)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutevar_pd&expand=4190)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilpd))]
@@ -19958,7 +19958,7 @@ pub unsafe fn _mm512_maskz_permutevar_pd(k: __mmask8, a: __m512d, b: __m512i) ->
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutevar_pd&expand=4186)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutevar_pd&expand=4186)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilpd))]
@@ -19974,7 +19974,7 @@ pub unsafe fn _mm256_mask_permutevar_pd(
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutevar_pd&expand=4187)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutevar_pd&expand=4187)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilpd))]
@@ -19986,7 +19986,7 @@ pub unsafe fn _mm256_maskz_permutevar_pd(k: __mmask8, a: __m256d, b: __m256i) ->
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_permutevar_pd&expand=4183)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutevar_pd&expand=4183)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilpd))]
@@ -19997,7 +19997,7 @@ pub unsafe fn _mm_mask_permutevar_pd(src: __m128d, k: __mmask8, a: __m128d, b: _
/// Shuffle double-precision (64-bit) floating-point elements in a within 128-bit lanes using the control in b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_permutevar_pd&expand=4184)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutevar_pd&expand=4184)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermilpd))]
@@ -20009,7 +20009,7 @@ pub unsafe fn _mm_maskz_permutevar_pd(k: __mmask8, a: __m128d, b: __m128i) -> __
/// Shuffle 32-bit integers in a across lanes using the corresponding index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutexvar_epi32&expand=4301)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutexvar_epi32&expand=4301)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermd
@@ -20019,7 +20019,7 @@ pub unsafe fn _mm512_permutexvar_epi32(idx: __m512i, a: __m512i) -> __m512i {
/// Shuffle 32-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutexvar_epi32&expand=4299)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutexvar_epi32&expand=4299)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermd))]
@@ -20035,7 +20035,7 @@ pub unsafe fn _mm512_mask_permutexvar_epi32(
/// Shuffle 32-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutexvar_epi32&expand=4300)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutexvar_epi32&expand=4300)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermd))]
@@ -20047,7 +20047,7 @@ pub unsafe fn _mm512_maskz_permutexvar_epi32(k: __mmask16, idx: __m512i, a: __m5
/// Shuffle 32-bit integers in a across lanes using the corresponding index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_epi32&expand=4298)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutexvar_epi32&expand=4298)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermd
@@ -20057,7 +20057,7 @@ pub unsafe fn _mm256_permutexvar_epi32(idx: __m256i, a: __m256i) -> __m256i {
/// Shuffle 32-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutexvar_epi32&expand=4296)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutexvar_epi32&expand=4296)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermd))]
@@ -20073,7 +20073,7 @@ pub unsafe fn _mm256_mask_permutexvar_epi32(
/// Shuffle 32-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutexvar_epi32&expand=4297)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutexvar_epi32&expand=4297)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermd))]
@@ -20085,7 +20085,7 @@ pub unsafe fn _mm256_maskz_permutexvar_epi32(k: __mmask8, idx: __m256i, a: __m25
/// Shuffle 64-bit integers in a across lanes using the corresponding index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutexvar_epi64&expand=4307)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutexvar_epi64&expand=4307)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermq
@@ -20095,7 +20095,7 @@ pub unsafe fn _mm512_permutexvar_epi64(idx: __m512i, a: __m512i) -> __m512i {
/// Shuffle 64-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutexvar_epi64&expand=4305)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutexvar_epi64&expand=4305)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermq))]
@@ -20111,7 +20111,7 @@ pub unsafe fn _mm512_mask_permutexvar_epi64(
/// Shuffle 64-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutexvar_epi64&expand=4306)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutexvar_epi64&expand=4306)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermq))]
@@ -20123,7 +20123,7 @@ pub unsafe fn _mm512_maskz_permutexvar_epi64(k: __mmask8, idx: __m512i, a: __m51
/// Shuffle 64-bit integers in a across lanes using the corresponding index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_epi64&expand=4304)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutexvar_epi64&expand=4304)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermq
@@ -20133,7 +20133,7 @@ pub unsafe fn _mm256_permutexvar_epi64(idx: __m256i, a: __m256i) -> __m256i {
/// Shuffle 64-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutexvar_epi64&expand=4302)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutexvar_epi64&expand=4302)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermq))]
@@ -20149,7 +20149,7 @@ pub unsafe fn _mm256_mask_permutexvar_epi64(
/// Shuffle 64-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutexvar_epi64&expand=4303)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutexvar_epi64&expand=4303)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermq))]
@@ -20161,7 +20161,7 @@ pub unsafe fn _mm256_maskz_permutexvar_epi64(k: __mmask8, idx: __m256i, a: __m25
/// Shuffle single-precision (32-bit) floating-point elements in a across lanes using the corresponding index in idx.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutevar_ps&expand=4200)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutevar_ps&expand=4200)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermps))]
@@ -20171,7 +20171,7 @@ pub unsafe fn _mm512_permutexvar_ps(idx: __m512i, a: __m512) -> __m512 {
/// Shuffle single-precision (32-bit) floating-point elements in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutexvar_ps&expand=4326)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutexvar_ps&expand=4326)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermps))]
@@ -20187,7 +20187,7 @@ pub unsafe fn _mm512_mask_permutexvar_ps(
/// Shuffle single-precision (32-bit) floating-point elements in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutexvar_ps&expand=4327)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutexvar_ps&expand=4327)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermps))]
@@ -20199,7 +20199,7 @@ pub unsafe fn _mm512_maskz_permutexvar_ps(k: __mmask16, idx: __m512i, a: __m512)
/// Shuffle single-precision (32-bit) floating-point elements in a across lanes using the corresponding index in idx.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_ps&expand=4325)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutexvar_ps&expand=4325)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermps))]
@@ -20209,7 +20209,7 @@ pub unsafe fn _mm256_permutexvar_ps(idx: __m256i, a: __m256) -> __m256 {
/// Shuffle single-precision (32-bit) floating-point elements in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutexvar_ps&expand=4323)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutexvar_ps&expand=4323)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermps))]
@@ -20225,7 +20225,7 @@ pub unsafe fn _mm256_mask_permutexvar_ps(
/// Shuffle single-precision (32-bit) floating-point elements in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutexvar_ps&expand=4324)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutexvar_ps&expand=4324)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermps))]
@@ -20237,7 +20237,7 @@ pub unsafe fn _mm256_maskz_permutexvar_ps(k: __mmask8, idx: __m256i, a: __m256)
/// Shuffle double-precision (64-bit) floating-point elements in a across lanes using the corresponding index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutexvar_pd&expand=4322)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutexvar_pd&expand=4322)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermpd))]
@@ -20247,7 +20247,7 @@ pub unsafe fn _mm512_permutexvar_pd(idx: __m512i, a: __m512d) -> __m512d {
/// Shuffle double-precision (64-bit) floating-point elements in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutexvar_pd&expand=4320)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutexvar_pd&expand=4320)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermpd))]
@@ -20263,7 +20263,7 @@ pub unsafe fn _mm512_mask_permutexvar_pd(
/// Shuffle double-precision (64-bit) floating-point elements in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutexvar_pd&expand=4321)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutexvar_pd&expand=4321)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermpd))]
@@ -20275,7 +20275,7 @@ pub unsafe fn _mm512_maskz_permutexvar_pd(k: __mmask8, idx: __m512i, a: __m512d)
/// Shuffle double-precision (64-bit) floating-point elements in a across lanes using the corresponding index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_pd&expand=4319)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutexvar_pd&expand=4319)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermpd))]
@@ -20285,7 +20285,7 @@ pub unsafe fn _mm256_permutexvar_pd(idx: __m256i, a: __m256d) -> __m256d {
/// Shuffle double-precision (64-bit) floating-point elements in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutexvar_pd&expand=4317)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutexvar_pd&expand=4317)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermpd))]
@@ -20301,7 +20301,7 @@ pub unsafe fn _mm256_mask_permutexvar_pd(
/// Shuffle double-precision (64-bit) floating-point elements in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutexvar_pd&expand=4318)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutexvar_pd&expand=4318)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermpd))]
@@ -20313,7 +20313,7 @@ pub unsafe fn _mm256_maskz_permutexvar_pd(k: __mmask8, idx: __m256i, a: __m256d)
/// Shuffle 32-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_epi32&expand=4238)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex2var_epi32&expand=4238)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2d or vpermt2d
@@ -20323,7 +20323,7 @@ pub unsafe fn _mm512_permutex2var_epi32(a: __m512i, idx: __m512i, b: __m512i) ->
/// Shuffle 32-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutex2var_epi32&expand=4235)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex2var_epi32&expand=4235)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermt2d))]
@@ -20339,7 +20339,7 @@ pub unsafe fn _mm512_mask_permutex2var_epi32(
/// Shuffle 32-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutex2var_epi32&expand=4237)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex2var_epi32&expand=4237)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2d or vpermt2d
@@ -20356,7 +20356,7 @@ pub unsafe fn _mm512_maskz_permutex2var_epi32(
/// Shuffle 32-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask2_permutex2var_epi32&expand=4236)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask2_permutex2var_epi32&expand=4236)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermi2d))]
@@ -20372,7 +20372,7 @@ pub unsafe fn _mm512_mask2_permutex2var_epi32(
/// Shuffle 32-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_epi32&expand=4234)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex2var_epi32&expand=4234)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2d or vpermt2d
@@ -20382,7 +20382,7 @@ pub unsafe fn _mm256_permutex2var_epi32(a: __m256i, idx: __m256i, b: __m256i) ->
/// Shuffle 32-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutex2var_epi32&expand=4231)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex2var_epi32&expand=4231)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermt2d))]
@@ -20398,7 +20398,7 @@ pub unsafe fn _mm256_mask_permutex2var_epi32(
/// Shuffle 32-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutex2var_epi32&expand=4233)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex2var_epi32&expand=4233)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2d or vpermt2d
@@ -20415,7 +20415,7 @@ pub unsafe fn _mm256_maskz_permutex2var_epi32(
/// Shuffle 32-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask2_permutex2var_epi32&expand=4232)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask2_permutex2var_epi32&expand=4232)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermi2d))]
@@ -20431,7 +20431,7 @@ pub unsafe fn _mm256_mask2_permutex2var_epi32(
/// Shuffle 32-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_epi32&expand=4230)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutex2var_epi32&expand=4230)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2d or vpermt2d
@@ -20441,7 +20441,7 @@ pub unsafe fn _mm_permutex2var_epi32(a: __m128i, idx: __m128i, b: __m128i) -> __
/// Shuffle 32-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_permutex2var_epi32&expand=4227)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutex2var_epi32&expand=4227)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermt2d))]
@@ -20457,7 +20457,7 @@ pub unsafe fn _mm_mask_permutex2var_epi32(
/// Shuffle 32-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_permutex2var_epi32&expand=4229)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutex2var_epi32&expand=4229)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2d or vpermt2d
@@ -20474,7 +20474,7 @@ pub unsafe fn _mm_maskz_permutex2var_epi32(
/// Shuffle 32-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask2_permutex2var_epi32&expand=4228)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask2_permutex2var_epi32&expand=4228)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermi2d))]
@@ -20490,7 +20490,7 @@ pub unsafe fn _mm_mask2_permutex2var_epi32(
/// Shuffle 64-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_epi64&expand=4250)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex2var_epi64&expand=4250)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2q or vpermt2q
@@ -20500,7 +20500,7 @@ pub unsafe fn _mm512_permutex2var_epi64(a: __m512i, idx: __m512i, b: __m512i) ->
/// Shuffle 64-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutex2var_epi64&expand=4247)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex2var_epi64&expand=4247)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermt2q))]
@@ -20516,7 +20516,7 @@ pub unsafe fn _mm512_mask_permutex2var_epi64(
/// Shuffle 64-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutex2var_epi64&expand=4249)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex2var_epi64&expand=4249)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2q or vpermt2q
@@ -20533,7 +20533,7 @@ pub unsafe fn _mm512_maskz_permutex2var_epi64(
/// Shuffle 64-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask2_permutex2var_epi64&expand=4248)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask2_permutex2var_epi64&expand=4248)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermi2q))]
@@ -20549,7 +20549,7 @@ pub unsafe fn _mm512_mask2_permutex2var_epi64(
/// Shuffle 64-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_epi64&expand=4246)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex2var_epi64&expand=4246)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2q or vpermt2q
@@ -20559,7 +20559,7 @@ pub unsafe fn _mm256_permutex2var_epi64(a: __m256i, idx: __m256i, b: __m256i) ->
/// Shuffle 64-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutex2var_epi64&expand=4243)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex2var_epi64&expand=4243)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermt2q))]
@@ -20575,7 +20575,7 @@ pub unsafe fn _mm256_mask_permutex2var_epi64(
/// Shuffle 64-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutex2var_epi64&expand=4245)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex2var_epi64&expand=4245)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2q or vpermt2q
@@ -20592,7 +20592,7 @@ pub unsafe fn _mm256_maskz_permutex2var_epi64(
/// Shuffle 64-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask2_permutex2var_epi64&expand=4244)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask2_permutex2var_epi64&expand=4244)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermi2q))]
@@ -20608,7 +20608,7 @@ pub unsafe fn _mm256_mask2_permutex2var_epi64(
/// Shuffle 64-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_epi64&expand=4242)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutex2var_epi64&expand=4242)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2q or vpermt2q
@@ -20618,7 +20618,7 @@ pub unsafe fn _mm_permutex2var_epi64(a: __m128i, idx: __m128i, b: __m128i) -> __
/// Shuffle 64-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_permutex2var_epi64&expand=4239)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutex2var_epi64&expand=4239)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermt2q))]
@@ -20634,7 +20634,7 @@ pub unsafe fn _mm_mask_permutex2var_epi64(
/// Shuffle 64-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_permutex2var_epi64&expand=4241)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutex2var_epi64&expand=4241)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2q or vpermt2q
@@ -20651,7 +20651,7 @@ pub unsafe fn _mm_maskz_permutex2var_epi64(
/// Shuffle 64-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask2_permutex2var_epi64&expand=4240)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask2_permutex2var_epi64&expand=4240)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermi2q))]
@@ -20667,7 +20667,7 @@ pub unsafe fn _mm_mask2_permutex2var_epi64(
/// Shuffle single-precision (32-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_ps&expand=4286)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex2var_ps&expand=4286)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2ps or vpermt2ps
@@ -20677,7 +20677,7 @@ pub unsafe fn _mm512_permutex2var_ps(a: __m512, idx: __m512i, b: __m512) -> __m5
/// Shuffle single-precision (32-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutex2var_ps&expand=4283)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex2var_ps&expand=4283)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermt2ps))]
@@ -20693,7 +20693,7 @@ pub unsafe fn _mm512_mask_permutex2var_ps(
/// Shuffle single-precision (32-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutex2var_ps&expand=4285)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex2var_ps&expand=4285)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2ps or vpermt2ps
@@ -20710,7 +20710,7 @@ pub unsafe fn _mm512_maskz_permutex2var_ps(
/// Shuffle single-precision (32-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask2_permutex2var_ps&expand=4284)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask2_permutex2var_ps&expand=4284)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermi2ps, but it shows vpermt2ps
@@ -20727,7 +20727,7 @@ pub unsafe fn _mm512_mask2_permutex2var_ps(
/// Shuffle single-precision (32-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_ps&expand=4282)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex2var_ps&expand=4282)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2ps or vpermt2ps
@@ -20737,7 +20737,7 @@ pub unsafe fn _mm256_permutex2var_ps(a: __m256, idx: __m256i, b: __m256) -> __m2
/// Shuffle single-precision (32-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutex2var_ps&expand=4279)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex2var_ps&expand=4279)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermt2ps))]
@@ -20753,7 +20753,7 @@ pub unsafe fn _mm256_mask_permutex2var_ps(
/// Shuffle single-precision (32-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutex2var_ps&expand=4281)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex2var_ps&expand=4281)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2ps or vpermt2ps
@@ -20770,7 +20770,7 @@ pub unsafe fn _mm256_maskz_permutex2var_ps(
/// Shuffle single-precision (32-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask2_permutex2var_ps&expand=4280)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask2_permutex2var_ps&expand=4280)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermi2ps, but it shows vpermt2ps
@@ -20787,7 +20787,7 @@ pub unsafe fn _mm256_mask2_permutex2var_ps(
/// Shuffle single-precision (32-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_ps&expand=4278)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutex2var_ps&expand=4278)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2ps or vpermt2ps
@@ -20797,7 +20797,7 @@ pub unsafe fn _mm_permutex2var_ps(a: __m128, idx: __m128i, b: __m128) -> __m128
/// Shuffle single-precision (32-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_permutex2var_ps&expand=4275)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutex2var_ps&expand=4275)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermt2ps))]
@@ -20808,7 +20808,7 @@ pub unsafe fn _mm_mask_permutex2var_ps(a: __m128, k: __mmask8, idx: __m128i, b:
/// Shuffle single-precision (32-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_permutex2var_ps&expand=4277)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutex2var_ps&expand=4277)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2ps or vpermt2ps
@@ -20820,7 +20820,7 @@ pub unsafe fn _mm_maskz_permutex2var_ps(k: __mmask8, a: __m128, idx: __m128i, b:
/// Shuffle single-precision (32-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask2_permutex2var_ps&expand=4276)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask2_permutex2var_ps&expand=4276)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermi2ps, but it shows vpermt2ps
@@ -20832,7 +20832,7 @@ pub unsafe fn _mm_mask2_permutex2var_ps(a: __m128, idx: __m128i, k: __mmask8, b:
/// Shuffle double-precision (64-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_pd&expand=4274)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex2var_pd&expand=4274)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2pd or vpermt2pd
@@ -20842,7 +20842,7 @@ pub unsafe fn _mm512_permutex2var_pd(a: __m512d, idx: __m512i, b: __m512d) -> __
/// Shuffle double-precision (64-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutex2var_pd&expand=4271)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex2var_pd&expand=4271)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermt2pd))]
@@ -20858,7 +20858,7 @@ pub unsafe fn _mm512_mask_permutex2var_pd(
/// Shuffle double-precision (64-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutex2var_pd&expand=4273)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex2var_pd&expand=4273)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2pd or vpermt2pd
@@ -20875,7 +20875,7 @@ pub unsafe fn _mm512_maskz_permutex2var_pd(
/// Shuffle double-precision (64-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask2_permutex2var_pd&expand=4272)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask2_permutex2var_pd&expand=4272)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermi2pd, but it shows vpermt2pd
@@ -20892,7 +20892,7 @@ pub unsafe fn _mm512_mask2_permutex2var_pd(
/// Shuffle double-precision (64-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_pd&expand=4270)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex2var_pd&expand=4270)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2pd or vpermt2pd
@@ -20902,7 +20902,7 @@ pub unsafe fn _mm256_permutex2var_pd(a: __m256d, idx: __m256i, b: __m256d) -> __
/// Shuffle double-precision (64-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutex2var_pd&expand=4267)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex2var_pd&expand=4267)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermt2pd))]
@@ -20918,7 +20918,7 @@ pub unsafe fn _mm256_mask_permutex2var_pd(
/// Shuffle double-precision (64-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutex2var_pd&expand=4269)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex2var_pd&expand=4269)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2pd or vpermt2pd
@@ -20935,7 +20935,7 @@ pub unsafe fn _mm256_maskz_permutex2var_pd(
/// Shuffle double-precision (64-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask2_permutex2var_pd&expand=4268)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask2_permutex2var_pd&expand=4268)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermi2pd, but it shows vpermt2pd
@@ -20952,7 +20952,7 @@ pub unsafe fn _mm256_mask2_permutex2var_pd(
/// Shuffle double-precision (64-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_pd&expand=4266)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutex2var_pd&expand=4266)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2pd or vpermt2pd
@@ -20962,7 +20962,7 @@ pub unsafe fn _mm_permutex2var_pd(a: __m128d, idx: __m128i, b: __m128d) -> __m12
/// Shuffle double-precision (64-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_permutex2var_pd&expand=4263)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutex2var_pd&expand=4263)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpermt2pd))]
@@ -20978,7 +20978,7 @@ pub unsafe fn _mm_mask_permutex2var_pd(
/// Shuffle double-precision (64-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_permutex2var_pd&expand=4265)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutex2var_pd&expand=4265)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //vpermi2pd or vpermt2pd
@@ -20995,7 +20995,7 @@ pub unsafe fn _mm_maskz_permutex2var_pd(
/// Shuffle double-precision (64-bit) floating-point elements in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from idx when the corresponding mask bit is not set)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask2_permutex2var_pd&expand=4264)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask2_permutex2var_pd&expand=4264)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermi2pd, but it shows vpermt2pd
@@ -21012,17 +21012,17 @@ pub unsafe fn _mm_mask2_permutex2var_pd(
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_shuffle_epi32&expand=5150)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_shuffle_epi32&expand=5150)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpermilps, MASK = 9))] //should be vpshufd
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_shuffle_epi32<const MASK: _MM_PERM_ENUM>(a: __m512i) -> __m512i {
- static_assert_imm8!(MASK);
- let r: i32x16 = simd_shuffle16!(
+ static_assert_uimm_bits!(MASK, 8);
+ let r: i32x16 = simd_shuffle!(
a.as_i32x16(),
a.as_i32x16(),
- <const MASK: _MM_PERM_ENUM> [
+ [
MASK as u32 & 0b11,
(MASK as u32 >> 2) & 0b11,
(MASK as u32 >> 4) & 0b11,
@@ -21046,7 +21046,7 @@ pub unsafe fn _mm512_shuffle_epi32<const MASK: _MM_PERM_ENUM>(a: __m512i) -> __m
/// Shuffle 32-bit integers in a within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shuffle_epi32&expand=5148)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_epi32&expand=5148)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpshufd, MASK = 9))]
@@ -21056,14 +21056,14 @@ pub unsafe fn _mm512_mask_shuffle_epi32<const MASK: _MM_PERM_ENUM>(
k: __mmask16,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_epi32::<MASK>(a);
transmute(simd_select_bitmask(k, r.as_i32x16(), src.as_i32x16()))
}
/// Shuffle 32-bit integers in a within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shuffle_epi32&expand=5149)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_epi32&expand=5149)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpshufd, MASK = 9))]
@@ -21072,7 +21072,7 @@ pub unsafe fn _mm512_maskz_shuffle_epi32<const MASK: _MM_PERM_ENUM>(
k: __mmask16,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_epi32::<MASK>(a);
let zero = _mm512_setzero_si512().as_i32x16();
transmute(simd_select_bitmask(k, r.as_i32x16(), zero))
@@ -21080,7 +21080,7 @@ pub unsafe fn _mm512_maskz_shuffle_epi32<const MASK: _MM_PERM_ENUM>(
/// Shuffle 32-bit integers in a within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shuffle_epi32&expand=5145)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_epi32&expand=5145)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufd, MASK = 9))]
@@ -21090,14 +21090,14 @@ pub unsafe fn _mm256_mask_shuffle_epi32<const MASK: _MM_PERM_ENUM>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_epi32::<MASK>(a);
transmute(simd_select_bitmask(k, r.as_i32x8(), src.as_i32x8()))
}
/// Shuffle 32-bit integers in a within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shuffle_epi32&expand=5146)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_epi32&expand=5146)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufd, MASK = 9))]
@@ -21106,7 +21106,7 @@ pub unsafe fn _mm256_maskz_shuffle_epi32<const MASK: _MM_PERM_ENUM>(
k: __mmask8,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_epi32::<MASK>(a);
let zero = _mm256_setzero_si256().as_i32x8();
transmute(simd_select_bitmask(k, r.as_i32x8(), zero))
@@ -21114,7 +21114,7 @@ pub unsafe fn _mm256_maskz_shuffle_epi32<const MASK: _MM_PERM_ENUM>(
/// Shuffle 32-bit integers in a within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shuffle_epi32&expand=5142)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shuffle_epi32&expand=5142)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufd, MASK = 9))]
@@ -21124,14 +21124,14 @@ pub unsafe fn _mm_mask_shuffle_epi32<const MASK: _MM_PERM_ENUM>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm_shuffle_epi32::<MASK>(a);
transmute(simd_select_bitmask(k, r.as_i32x4(), src.as_i32x4()))
}
/// Shuffle 32-bit integers in a within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shuffle_epi32&expand=5143)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shuffle_epi32&expand=5143)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpshufd, MASK = 9))]
@@ -21140,7 +21140,7 @@ pub unsafe fn _mm_maskz_shuffle_epi32<const MASK: _MM_PERM_ENUM>(
k: __mmask8,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm_shuffle_epi32::<MASK>(a);
let zero = _mm_setzero_si128().as_i32x4();
transmute(simd_select_bitmask(k, r.as_i32x4(), zero))
@@ -21148,17 +21148,17 @@ pub unsafe fn _mm_maskz_shuffle_epi32<const MASK: _MM_PERM_ENUM>(
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shuffle_ps&expand=5203)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_ps&expand=5203)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshufps, MASK = 3))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_shuffle_ps<const MASK: i32>(a: __m512, b: __m512) -> __m512 {
- static_assert_imm8!(MASK);
- simd_shuffle16!(
+ static_assert_uimm_bits!(MASK, 8);
+ simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
MASK as u32 & 0b11,
(MASK as u32 >> 2) & 0b11,
((MASK as u32 >> 4) & 0b11) + 16,
@@ -21181,7 +21181,7 @@ pub unsafe fn _mm512_shuffle_ps<const MASK: i32>(a: __m512, b: __m512) -> __m512
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shuffle_ps&expand=5201)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_ps&expand=5201)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshufps, MASK = 3))]
@@ -21192,14 +21192,14 @@ pub unsafe fn _mm512_mask_shuffle_ps<const MASK: i32>(
a: __m512,
b: __m512,
) -> __m512 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_ps::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_f32x16(), src.as_f32x16()))
}
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shuffle_ps&expand=5202)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_ps&expand=5202)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshufps, MASK = 3))]
@@ -21209,7 +21209,7 @@ pub unsafe fn _mm512_maskz_shuffle_ps<const MASK: i32>(
a: __m512,
b: __m512,
) -> __m512 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_ps::<MASK>(a, b);
let zero = _mm512_setzero_ps().as_f32x16();
transmute(simd_select_bitmask(k, r.as_f32x16(), zero))
@@ -21217,7 +21217,7 @@ pub unsafe fn _mm512_maskz_shuffle_ps<const MASK: i32>(
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shuffle_ps&expand=5198)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_ps&expand=5198)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshufps, MASK = 3))]
@@ -21228,14 +21228,14 @@ pub unsafe fn _mm256_mask_shuffle_ps<const MASK: i32>(
a: __m256,
b: __m256,
) -> __m256 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_ps::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_f32x8(), src.as_f32x8()))
}
/// Shuffle single-precision (32-bit) floating-point elements in a within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shuffle_ps&expand=5199)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_ps&expand=5199)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshufps, MASK = 3))]
@@ -21245,7 +21245,7 @@ pub unsafe fn _mm256_maskz_shuffle_ps<const MASK: i32>(
a: __m256,
b: __m256,
) -> __m256 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_ps::<MASK>(a, b);
let zero = _mm256_setzero_ps().as_f32x8();
transmute(simd_select_bitmask(k, r.as_f32x8(), zero))
@@ -21253,7 +21253,7 @@ pub unsafe fn _mm256_maskz_shuffle_ps<const MASK: i32>(
/// Shuffle single-precision (32-bit) floating-point elements in a using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shuffle_ps&expand=5195)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shuffle_ps&expand=5195)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshufps, MASK = 3))]
@@ -21264,20 +21264,20 @@ pub unsafe fn _mm_mask_shuffle_ps<const MASK: i32>(
a: __m128,
b: __m128,
) -> __m128 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm_shuffle_ps::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_f32x4(), src.as_f32x4()))
}
/// Shuffle single-precision (32-bit) floating-point elements in a using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shuffle_ps&expand=5196)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shuffle_ps&expand=5196)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshufps, MASK = 3))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm_maskz_shuffle_ps<const MASK: i32>(k: __mmask8, a: __m128, b: __m128) -> __m128 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm_shuffle_ps::<MASK>(a, b);
let zero = _mm_setzero_ps().as_f32x4();
transmute(simd_select_bitmask(k, r.as_f32x4(), zero))
@@ -21285,17 +21285,17 @@ pub unsafe fn _mm_maskz_shuffle_ps<const MASK: i32>(k: __mmask8, a: __m128, b: _
/// Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in imm8, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shuffle_pd&expand=5192)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_pd&expand=5192)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshufpd, MASK = 3))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_shuffle_pd<const MASK: i32>(a: __m512d, b: __m512d) -> __m512d {
- static_assert_imm8!(MASK);
- simd_shuffle8!(
+ static_assert_uimm_bits!(MASK, 8);
+ simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
MASK as u32 & 0b1,
((MASK as u32 >> 1) & 0b1) + 8,
((MASK as u32 >> 2) & 0b1) + 2,
@@ -21310,7 +21310,7 @@ pub unsafe fn _mm512_shuffle_pd<const MASK: i32>(a: __m512d, b: __m512d) -> __m5
/// Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shuffle_pd&expand=5190)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_pd&expand=5190)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshufpd, MASK = 3))]
@@ -21321,14 +21321,14 @@ pub unsafe fn _mm512_mask_shuffle_pd<const MASK: i32>(
a: __m512d,
b: __m512d,
) -> __m512d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_pd::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_f64x8(), src.as_f64x8()))
}
/// Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shuffle_pd&expand=5191)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_pd&expand=5191)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshufpd, MASK = 3))]
@@ -21338,7 +21338,7 @@ pub unsafe fn _mm512_maskz_shuffle_pd<const MASK: i32>(
a: __m512d,
b: __m512d,
) -> __m512d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_pd::<MASK>(a, b);
let zero = _mm512_setzero_pd().as_f64x8();
transmute(simd_select_bitmask(k, r.as_f64x8(), zero))
@@ -21346,7 +21346,7 @@ pub unsafe fn _mm512_maskz_shuffle_pd<const MASK: i32>(
/// Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shuffle_pd&expand=5187)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_pd&expand=5187)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshufpd, MASK = 3))]
@@ -21357,14 +21357,14 @@ pub unsafe fn _mm256_mask_shuffle_pd<const MASK: i32>(
a: __m256d,
b: __m256d,
) -> __m256d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_pd::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_f64x4(), src.as_f64x4()))
}
/// Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shuffle_pd&expand=5188)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_pd&expand=5188)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshufpd, MASK = 3))]
@@ -21374,7 +21374,7 @@ pub unsafe fn _mm256_maskz_shuffle_pd<const MASK: i32>(
a: __m256d,
b: __m256d,
) -> __m256d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_pd::<MASK>(a, b);
let zero = _mm256_setzero_pd().as_f64x4();
transmute(simd_select_bitmask(k, r.as_f64x4(), zero))
@@ -21382,7 +21382,7 @@ pub unsafe fn _mm256_maskz_shuffle_pd<const MASK: i32>(
/// Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shuffle_pd&expand=5184)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shuffle_pd&expand=5184)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshufpd, MASK = 1))]
@@ -21393,14 +21393,14 @@ pub unsafe fn _mm_mask_shuffle_pd<const MASK: i32>(
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm_shuffle_pd::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_f64x2(), src.as_f64x2()))
}
/// Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shuffle_pd&expand=5185)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shuffle_pd&expand=5185)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshufpd, MASK = 1))]
@@ -21410,7 +21410,7 @@ pub unsafe fn _mm_maskz_shuffle_pd<const MASK: i32>(
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm_shuffle_pd::<MASK>(a, b);
let zero = _mm_setzero_pd().as_f64x2();
transmute(simd_select_bitmask(k, r.as_f64x2(), zero))
@@ -21418,19 +21418,19 @@ pub unsafe fn _mm_maskz_shuffle_pd<const MASK: i32>(
/// Shuffle 128-bits (composed of 4 32-bit integers) selected by imm8 from a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shuffle_i32&expand=5177)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_i32&expand=5177)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_01_01_01))] //should be vshufi32x4
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_shuffle_i32x4<const MASK: i32>(a: __m512i, b: __m512i) -> __m512i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let a = a.as_i32x16();
let b = b.as_i32x16();
- let r: i32x16 = simd_shuffle16!(
+ let r: i32x16 = simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
(MASK as u32 & 0b11) * 4 + 0,
(MASK as u32 & 0b11) * 4 + 1,
(MASK as u32 & 0b11) * 4 + 2,
@@ -21454,7 +21454,7 @@ pub unsafe fn _mm512_shuffle_i32x4<const MASK: i32>(a: __m512i, b: __m512i) -> _
/// Shuffle 128-bits (composed of 4 32-bit integers) selected by imm8 from a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shuffle_i32x&expand=5175)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_i32x&expand=5175)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b10_11_01_01))]
@@ -21465,14 +21465,14 @@ pub unsafe fn _mm512_mask_shuffle_i32x4<const MASK: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_i32x4::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_i32x16(), src.as_i32x16()))
}
/// Shuffle 128-bits (composed of 4 32-bit integers) selected by imm8 from a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shuffle_i32&expand=5176)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_i32&expand=5176)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b10_11_01_01))]
@@ -21482,7 +21482,7 @@ pub unsafe fn _mm512_maskz_shuffle_i32x4<const MASK: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_i32x4::<MASK>(a, b);
let zero = _mm512_setzero_si512().as_i32x16();
transmute(simd_select_bitmask(k, r.as_i32x16(), zero))
@@ -21490,19 +21490,19 @@ pub unsafe fn _mm512_maskz_shuffle_i32x4<const MASK: i32>(
/// Shuffle 128-bits (composed of 4 32-bit integers) selected by imm8 from a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shuffle_i32x4&expand=5174)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_i32x4&expand=5174)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b11))] //should be vshufi32x4
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_shuffle_i32x4<const MASK: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let a = a.as_i32x8();
let b = b.as_i32x8();
- let r: i32x8 = simd_shuffle8!(
+ let r: i32x8 = simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
(MASK as u32 & 0b1) * 4 + 0,
(MASK as u32 & 0b1) * 4 + 1,
(MASK as u32 & 0b1) * 4 + 2,
@@ -21518,7 +21518,7 @@ pub unsafe fn _mm256_shuffle_i32x4<const MASK: i32>(a: __m256i, b: __m256i) -> _
/// Shuffle 128-bits (composed of 4 32-bit integers) selected by imm8 from a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shuffle_i32x4&expand=5172)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_i32x4&expand=5172)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b11))]
@@ -21529,14 +21529,14 @@ pub unsafe fn _mm256_mask_shuffle_i32x4<const MASK: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_i32x4::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_i32x8(), src.as_i32x8()))
}
/// Shuffle 128-bits (composed of 4 32-bit integers) selected by imm8 from a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shuffle_i32x4&expand=5173)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_i32x4&expand=5173)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshufi32x4, MASK = 0b11))]
@@ -21546,7 +21546,7 @@ pub unsafe fn _mm256_maskz_shuffle_i32x4<const MASK: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_i32x4::<MASK>(a, b);
let zero = _mm256_setzero_si256().as_i32x8();
transmute(simd_select_bitmask(k, r.as_i32x8(), zero))
@@ -21554,19 +21554,19 @@ pub unsafe fn _mm256_maskz_shuffle_i32x4<const MASK: i32>(
/// Shuffle 128-bits (composed of 2 64-bit integers) selected by imm8 from a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shuffle_i64x2&expand=5183)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_i64x2&expand=5183)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_shuffle_i64x2<const MASK: i32>(a: __m512i, b: __m512i) -> __m512i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let a = a.as_i64x8();
let b = b.as_i64x8();
- let r: i64x8 = simd_shuffle8!(
+ let r: i64x8 = simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
(MASK as u32 & 0b11) * 2 + 0,
(MASK as u32 & 0b11) * 2 + 1,
((MASK as u32 >> 2) & 0b11) * 2 + 0,
@@ -21582,7 +21582,7 @@ pub unsafe fn _mm512_shuffle_i64x2<const MASK: i32>(a: __m512i, b: __m512i) -> _
/// Shuffle 128-bits (composed of 2 64-bit integers) selected by imm8 from a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shuffle_i64x&expand=5181)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_i64x&expand=5181)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))]
@@ -21593,14 +21593,14 @@ pub unsafe fn _mm512_mask_shuffle_i64x2<const MASK: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_i64x2::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_i64x8(), src.as_i64x8()))
}
/// Shuffle 128-bits (composed of 2 64-bit integers) selected by imm8 from a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shuffle_i64&expand=5182)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_i64&expand=5182)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b10_11_11_11))]
@@ -21610,7 +21610,7 @@ pub unsafe fn _mm512_maskz_shuffle_i64x2<const MASK: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_i64x2::<MASK>(a, b);
let zero = _mm512_setzero_si512().as_i64x8();
transmute(simd_select_bitmask(k, r.as_i64x8(), zero))
@@ -21618,19 +21618,19 @@ pub unsafe fn _mm512_maskz_shuffle_i64x2<const MASK: i32>(
/// Shuffle 128-bits (composed of 2 64-bit integers) selected by imm8 from a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shuffle_i64x2&expand=5180)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_i64x2&expand=5180)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshufi64x2
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_shuffle_i64x2<const MASK: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let a = a.as_i64x4();
let b = b.as_i64x4();
- let r: i64x4 = simd_shuffle4!(
+ let r: i64x4 = simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
(MASK as u32 & 0b1) * 2 + 0,
(MASK as u32 & 0b1) * 2 + 1,
((MASK as u32 >> 1) & 0b1) * 2 + 0 + 4,
@@ -21642,7 +21642,7 @@ pub unsafe fn _mm256_shuffle_i64x2<const MASK: i32>(a: __m256i, b: __m256i) -> _
/// Shuffle 128-bits (composed of 2 64-bit integers) selected by imm8 from a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shuffle_i64x2&expand=5178)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_i64x2&expand=5178)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b11))]
@@ -21653,14 +21653,14 @@ pub unsafe fn _mm256_mask_shuffle_i64x2<const MASK: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_i64x2::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_i64x4(), src.as_i64x4()))
}
/// Shuffle 128-bits (composed of 2 64-bit integers) selected by imm8 from a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shuffle_i64x2&expand=5179)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_i64x2&expand=5179)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshufi64x2, MASK = 0b11))]
@@ -21670,7 +21670,7 @@ pub unsafe fn _mm256_maskz_shuffle_i64x2<const MASK: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_i64x2::<MASK>(a, b);
let zero = _mm256_setzero_si256().as_i64x4();
transmute(simd_select_bitmask(k, r.as_i64x4(), zero))
@@ -21678,19 +21678,19 @@ pub unsafe fn _mm256_maskz_shuffle_i64x2<const MASK: i32>(
/// Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by imm8 from a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shuffle_f32x4&expand=5165)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_f32x4&expand=5165)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b1011))] //should be vshuff32x4, but generate vshuff64x2
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_shuffle_f32x4<const MASK: i32>(a: __m512, b: __m512) -> __m512 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let a = a.as_f32x16();
let b = b.as_f32x16();
- let r: f32x16 = simd_shuffle16!(
+ let r: f32x16 = simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
(MASK as u32 & 0b11) * 4 + 0,
(MASK as u32 & 0b11) * 4 + 1,
(MASK as u32 & 0b11) * 4 + 2,
@@ -21714,7 +21714,7 @@ pub unsafe fn _mm512_shuffle_f32x4<const MASK: i32>(a: __m512, b: __m512) -> __m
/// Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by imm8 from a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shuffle_f32&expand=5163)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_f32&expand=5163)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b1011))]
@@ -21725,14 +21725,14 @@ pub unsafe fn _mm512_mask_shuffle_f32x4<const MASK: i32>(
a: __m512,
b: __m512,
) -> __m512 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_f32x4::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_f32x16(), src.as_f32x16()))
}
/// Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by imm8 from a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shuffle_f32&expand=5164)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_f32&expand=5164)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b1011))]
@@ -21742,7 +21742,7 @@ pub unsafe fn _mm512_maskz_shuffle_f32x4<const MASK: i32>(
a: __m512,
b: __m512,
) -> __m512 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_f32x4::<MASK>(a, b);
let zero = _mm512_setzero_ps().as_f32x16();
transmute(simd_select_bitmask(k, r.as_f32x16(), zero))
@@ -21750,19 +21750,19 @@ pub unsafe fn _mm512_maskz_shuffle_f32x4<const MASK: i32>(
/// Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by imm8 from a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shuffle_f32x4&expand=5162)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_f32x4&expand=5162)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshuff32x4
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_shuffle_f32x4<const MASK: i32>(a: __m256, b: __m256) -> __m256 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let a = a.as_f32x8();
let b = b.as_f32x8();
- let r: f32x8 = simd_shuffle8!(
+ let r: f32x8 = simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
(MASK as u32 & 0b1) * 4 + 0,
(MASK as u32 & 0b1) * 4 + 1,
(MASK as u32 & 0b1) * 4 + 2,
@@ -21778,7 +21778,7 @@ pub unsafe fn _mm256_shuffle_f32x4<const MASK: i32>(a: __m256, b: __m256) -> __m
/// Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by imm8 from a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shuffle_f32x4&expand=5160)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_f32x4&expand=5160)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b11))]
@@ -21789,14 +21789,14 @@ pub unsafe fn _mm256_mask_shuffle_f32x4<const MASK: i32>(
a: __m256,
b: __m256,
) -> __m256 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_f32x4::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_f32x8(), src.as_f32x8()))
}
/// Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by imm8 from a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shuffle_f32x4&expand=5161)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_f32x4&expand=5161)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshuff32x4, MASK = 0b11))]
@@ -21806,7 +21806,7 @@ pub unsafe fn _mm256_maskz_shuffle_f32x4<const MASK: i32>(
a: __m256,
b: __m256,
) -> __m256 {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_f32x4::<MASK>(a, b);
let zero = _mm256_setzero_ps().as_f32x8();
transmute(simd_select_bitmask(k, r.as_f32x8(), zero))
@@ -21814,19 +21814,19 @@ pub unsafe fn _mm256_maskz_shuffle_f32x4<const MASK: i32>(
/// Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by imm8 from a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shuffle_f64x2&expand=5171)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shuffle_f64x2&expand=5171)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_shuffle_f64x2<const MASK: i32>(a: __m512d, b: __m512d) -> __m512d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let a = a.as_f64x8();
let b = b.as_f64x8();
- let r: f64x8 = simd_shuffle8!(
+ let r: f64x8 = simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
(MASK as u32 & 0b11) * 2 + 0,
(MASK as u32 & 0b11) * 2 + 1,
((MASK as u32 >> 2) & 0b11) * 2 + 0,
@@ -21842,7 +21842,7 @@ pub unsafe fn _mm512_shuffle_f64x2<const MASK: i32>(a: __m512d, b: __m512d) -> _
/// Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by imm8 from a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shuffle_f64x2&expand=5169)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shuffle_f64x2&expand=5169)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))]
@@ -21853,14 +21853,14 @@ pub unsafe fn _mm512_mask_shuffle_f64x2<const MASK: i32>(
a: __m512d,
b: __m512d,
) -> __m512d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_f64x2::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_f64x8(), src.as_f64x8()))
}
/// Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by imm8 from a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shuffle_f64x2&expand=5170)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shuffle_f64x2&expand=5170)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b10_11_11_11))]
@@ -21870,7 +21870,7 @@ pub unsafe fn _mm512_maskz_shuffle_f64x2<const MASK: i32>(
a: __m512d,
b: __m512d,
) -> __m512d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm512_shuffle_f64x2::<MASK>(a, b);
let zero = _mm512_setzero_pd().as_f64x8();
transmute(simd_select_bitmask(k, r.as_f64x8(), zero))
@@ -21878,19 +21878,19 @@ pub unsafe fn _mm512_maskz_shuffle_f64x2<const MASK: i32>(
/// Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by imm8 from a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shuffle_f64x2&expand=5168)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shuffle_f64x2&expand=5168)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vperm, MASK = 0b01))] //should be vshuff64x2
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_shuffle_f64x2<const MASK: i32>(a: __m256d, b: __m256d) -> __m256d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let a = a.as_f64x4();
let b = b.as_f64x4();
- let r: f64x4 = simd_shuffle4!(
+ let r: f64x4 = simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
(MASK as u32 & 0b1) * 2 + 0,
(MASK as u32 & 0b1) * 2 + 1,
((MASK as u32 >> 1) & 0b1) * 2 + 0 + 4,
@@ -21902,7 +21902,7 @@ pub unsafe fn _mm256_shuffle_f64x2<const MASK: i32>(a: __m256d, b: __m256d) -> _
/// Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by imm8 from a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shuffle_f64x2&expand=5166)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shuffle_f64x2&expand=5166)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b11))]
@@ -21913,14 +21913,14 @@ pub unsafe fn _mm256_mask_shuffle_f64x2<const MASK: i32>(
a: __m256d,
b: __m256d,
) -> __m256d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_f64x2::<MASK>(a, b);
transmute(simd_select_bitmask(k, r.as_f64x4(), src.as_f64x4()))
}
/// Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by imm8 from a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shuffle_f64x2&expand=5167)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shuffle_f64x2&expand=5167)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vshuff64x2, MASK = 0b11))]
@@ -21930,7 +21930,7 @@ pub unsafe fn _mm256_maskz_shuffle_f64x2<const MASK: i32>(
a: __m256d,
b: __m256d,
) -> __m256d {
- static_assert_imm8!(MASK);
+ static_assert_uimm_bits!(MASK, 8);
let r = _mm256_shuffle_f64x2::<MASK>(a, b);
let zero = _mm256_setzero_pd().as_f64x4();
transmute(simd_select_bitmask(k, r.as_f64x4(), zero))
@@ -21938,7 +21938,7 @@ pub unsafe fn _mm256_maskz_shuffle_f64x2<const MASK: i32>(
/// Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from a, selected with imm8, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_extractf32x4_ps&expand=2442)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_extractf32x4_ps&expand=2442)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(
@@ -21947,18 +21947,18 @@ pub unsafe fn _mm256_maskz_shuffle_f64x2<const MASK: i32>(
)]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_extractf32x4_ps<const IMM8: i32>(a: __m512) -> __m128 {
- static_assert_imm2!(IMM8);
+ static_assert_uimm_bits!(IMM8, 2);
match IMM8 & 0x3 {
- 0 => simd_shuffle4!(a, _mm512_undefined_ps(), [0, 1, 2, 3]),
- 1 => simd_shuffle4!(a, _mm512_undefined_ps(), [4, 5, 6, 7]),
- 2 => simd_shuffle4!(a, _mm512_undefined_ps(), [8, 9, 10, 11]),
- _ => simd_shuffle4!(a, _mm512_undefined_ps(), [12, 13, 14, 15]),
+ 0 => simd_shuffle!(a, _mm512_undefined_ps(), [0, 1, 2, 3]),
+ 1 => simd_shuffle!(a, _mm512_undefined_ps(), [4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, _mm512_undefined_ps(), [8, 9, 10, 11]),
+ _ => simd_shuffle!(a, _mm512_undefined_ps(), [12, 13, 14, 15]),
}
}
/// Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from a, selected with imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_extractf32x4_ps&expand=2443)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_extractf32x4_ps&expand=2443)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(
@@ -21971,14 +21971,14 @@ pub unsafe fn _mm512_mask_extractf32x4_ps<const IMM8: i32>(
k: __mmask8,
a: __m512,
) -> __m128 {
- static_assert_imm2!(IMM8);
+ static_assert_uimm_bits!(IMM8, 2);
let r = _mm512_extractf32x4_ps::<IMM8>(a);
transmute(simd_select_bitmask(k, r.as_f32x4(), src.as_f32x4()))
}
/// Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from a, selected with imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_extractf32x4_ps&expand=2444)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_extractf32x4_ps&expand=2444)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(
@@ -21987,7 +21987,7 @@ pub unsafe fn _mm512_mask_extractf32x4_ps<const IMM8: i32>(
)]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_extractf32x4_ps<const IMM8: i32>(k: __mmask8, a: __m512) -> __m128 {
- static_assert_imm2!(IMM8);
+ static_assert_uimm_bits!(IMM8, 2);
let r = _mm512_extractf32x4_ps::<IMM8>(a);
let zero = _mm_setzero_ps().as_f32x4();
transmute(simd_select_bitmask(k, r.as_f32x4(), zero))
@@ -21995,7 +21995,7 @@ pub unsafe fn _mm512_maskz_extractf32x4_ps<const IMM8: i32>(k: __mmask8, a: __m5
/// Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from a, selected with imm8, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extractf32x4_ps&expand=2439)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extractf32x4_ps&expand=2439)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(
@@ -22004,16 +22004,16 @@ pub unsafe fn _mm512_maskz_extractf32x4_ps<const IMM8: i32>(k: __mmask8, a: __m5
)]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm256_extractf32x4_ps<const IMM8: i32>(a: __m256) -> __m128 {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
match IMM8 & 0x1 {
- 0 => simd_shuffle4!(a, _mm256_undefined_ps(), [0, 1, 2, 3]),
- _ => simd_shuffle4!(a, _mm256_undefined_ps(), [4, 5, 6, 7]),
+ 0 => simd_shuffle!(a, _mm256_undefined_ps(), [0, 1, 2, 3]),
+ _ => simd_shuffle!(a, _mm256_undefined_ps(), [4, 5, 6, 7]),
}
}
/// Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from a, selected with imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_extractf32x4_ps&expand=2440)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_extractf32x4_ps&expand=2440)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(
@@ -22026,14 +22026,14 @@ pub unsafe fn _mm256_mask_extractf32x4_ps<const IMM8: i32>(
k: __mmask8,
a: __m256,
) -> __m128 {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let r = _mm256_extractf32x4_ps::<IMM8>(a);
transmute(simd_select_bitmask(k, r.as_f32x4(), src.as_f32x4()))
}
/// Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from a, selected with imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_extractf32x4_ps&expand=2441)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_extractf32x4_ps&expand=2441)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(
@@ -22042,7 +22042,7 @@ pub unsafe fn _mm256_mask_extractf32x4_ps<const IMM8: i32>(
)]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_extractf32x4_ps<const IMM8: i32>(k: __mmask8, a: __m256) -> __m128 {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let r = _mm256_extractf32x4_ps::<IMM8>(a);
let zero = _mm_setzero_ps().as_f32x4();
transmute(simd_select_bitmask(k, r.as_f32x4(), zero))
@@ -22050,7 +22050,7 @@ pub unsafe fn _mm256_maskz_extractf32x4_ps<const IMM8: i32>(k: __mmask8, a: __m2
/// Extract 256 bits (composed of 4 packed 64-bit integers) from a, selected with IMM1, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_extracti64x4_epi64&expand=2473)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_extracti64x4_epi64&expand=2473)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(
@@ -22059,16 +22059,16 @@ pub unsafe fn _mm256_maskz_extractf32x4_ps<const IMM8: i32>(k: __mmask8, a: __m2
)]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_extracti64x4_epi64<const IMM1: i32>(a: __m512i) -> __m256i {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
match IMM1 {
- 0 => simd_shuffle4!(a, _mm512_set1_epi64(0), [0, 1, 2, 3]),
- _ => simd_shuffle4!(a, _mm512_set1_epi64(0), [4, 5, 6, 7]),
+ 0 => simd_shuffle!(a, _mm512_set1_epi64(0), [0, 1, 2, 3]),
+ _ => simd_shuffle!(a, _mm512_set1_epi64(0), [4, 5, 6, 7]),
}
}
/// Extract 256 bits (composed of 4 packed 64-bit integers) from a, selected with IMM1, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_extracti64x4_epi64&expand=2474)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_extracti64x4_epi64&expand=2474)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(
@@ -22081,14 +22081,14 @@ pub unsafe fn _mm512_mask_extracti64x4_epi64<const IMM1: i32>(
k: __mmask8,
a: __m512i,
) -> __m256i {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
let r = _mm512_extracti64x4_epi64::<IMM1>(a);
transmute(simd_select_bitmask(k, r.as_i64x4(), src.as_i64x4()))
}
/// Extract 256 bits (composed of 4 packed 64-bit integers) from a, selected with IMM1, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_extracti64x4_epi64&expand=2475)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_extracti64x4_epi64&expand=2475)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(
@@ -22097,7 +22097,7 @@ pub unsafe fn _mm512_mask_extracti64x4_epi64<const IMM1: i32>(
)]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_extracti64x4_epi64<const IMM1: i32>(k: __mmask8, a: __m512i) -> __m256i {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
let r = _mm512_extracti64x4_epi64::<IMM1>(a);
let zero = _mm256_setzero_si256().as_i64x4();
transmute(simd_select_bitmask(k, r.as_i64x4(), zero))
@@ -22105,7 +22105,7 @@ pub unsafe fn _mm512_maskz_extracti64x4_epi64<const IMM1: i32>(k: __mmask8, a: _
/// Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from a, selected with imm8, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_extractf64x4_pd&expand=2454)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_extractf64x4_pd&expand=2454)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(
@@ -22114,16 +22114,16 @@ pub unsafe fn _mm512_maskz_extracti64x4_epi64<const IMM1: i32>(k: __mmask8, a: _
)]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_extractf64x4_pd<const IMM8: i32>(a: __m512d) -> __m256d {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
match IMM8 & 0x1 {
- 0 => simd_shuffle4!(a, _mm512_undefined_pd(), [0, 1, 2, 3]),
- _ => simd_shuffle4!(a, _mm512_undefined_pd(), [4, 5, 6, 7]),
+ 0 => simd_shuffle!(a, _mm512_undefined_pd(), [0, 1, 2, 3]),
+ _ => simd_shuffle!(a, _mm512_undefined_pd(), [4, 5, 6, 7]),
}
}
/// Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from a, selected with imm8, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_extractf64x4_pd&expand=2455)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_extractf64x4_pd&expand=2455)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(
@@ -22136,14 +22136,14 @@ pub unsafe fn _mm512_mask_extractf64x4_pd<const IMM8: i32>(
k: __mmask8,
a: __m512d,
) -> __m256d {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let r = _mm512_extractf64x4_pd::<IMM8>(a);
transmute(simd_select_bitmask(k, r.as_f64x4(), src.as_f64x4()))
}
/// Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from a, selected with imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_extractf64x4_pd&expand=2456)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_extractf64x4_pd&expand=2456)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(
@@ -22152,7 +22152,7 @@ pub unsafe fn _mm512_mask_extractf64x4_pd<const IMM8: i32>(
)]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_extractf64x4_pd<const IMM8: i32>(k: __mmask8, a: __m512d) -> __m256d {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let r = _mm512_extractf64x4_pd::<IMM8>(a);
let zero = _mm256_setzero_pd().as_f64x4();
transmute(simd_select_bitmask(k, r.as_f64x4(), zero))
@@ -22160,7 +22160,7 @@ pub unsafe fn _mm512_maskz_extractf64x4_pd<const IMM8: i32>(k: __mmask8, a: __m5
/// Extract 128 bits (composed of 4 packed 32-bit integers) from a, selected with IMM2, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_extracti32x4_epi32&expand=2461)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_extracti32x4_epi32&expand=2461)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(
@@ -22169,21 +22169,21 @@ pub unsafe fn _mm512_maskz_extractf64x4_pd<const IMM8: i32>(k: __mmask8, a: __m5
)]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm512_extracti32x4_epi32<const IMM2: i32>(a: __m512i) -> __m128i {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
let a = a.as_i32x16();
let undefined = _mm512_undefined_epi32().as_i32x16();
let extract: i32x4 = match IMM2 {
- 0 => simd_shuffle4!(a, undefined, [0, 1, 2, 3]),
- 1 => simd_shuffle4!(a, undefined, [4, 5, 6, 7]),
- 2 => simd_shuffle4!(a, undefined, [8, 9, 10, 11]),
- _ => simd_shuffle4!(a, undefined, [12, 13, 14, 15]),
+ 0 => simd_shuffle!(a, undefined, [0, 1, 2, 3]),
+ 1 => simd_shuffle!(a, undefined, [4, 5, 6, 7]),
+ 2 => simd_shuffle!(a, undefined, [8, 9, 10, 11]),
+ _ => simd_shuffle!(a, undefined, [12, 13, 14, 15]),
};
transmute(extract)
}
/// Extract 128 bits (composed of 4 packed 32-bit integers) from a, selected with IMM2, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_extracti32x4_epi32&expand=2462)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_extracti32x4_epi32&expand=2462)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(
@@ -22196,14 +22196,14 @@ pub unsafe fn _mm512_mask_extracti32x4_epi32<const IMM2: i32>(
k: __mmask8,
a: __m512i,
) -> __m128i {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
let r = _mm512_extracti32x4_epi32::<IMM2>(a);
transmute(simd_select_bitmask(k, r.as_i32x4(), src.as_i32x4()))
}
/// Extract 128 bits (composed of 4 packed 32-bit integers) from a, selected with IMM2, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_extracti32x4_epi32&expand=2463)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_extracti32x4_epi32&expand=2463)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(
@@ -22212,7 +22212,7 @@ pub unsafe fn _mm512_mask_extracti32x4_epi32<const IMM2: i32>(
)]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_maskz_extracti32x4_epi32<const IMM2: i32>(k: __mmask8, a: __m512i) -> __m128i {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
let r = _mm512_extracti32x4_epi32::<IMM2>(a);
let zero = _mm_setzero_si128().as_i32x4();
transmute(simd_select_bitmask(k, r.as_i32x4(), zero))
@@ -22220,7 +22220,7 @@ pub unsafe fn _mm512_maskz_extracti32x4_epi32<const IMM2: i32>(k: __mmask8, a: _
/// Extract 128 bits (composed of 4 packed 32-bit integers) from a, selected with IMM1, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extracti32x4_epi32&expand=2458)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extracti32x4_epi32&expand=2458)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(
@@ -22229,19 +22229,19 @@ pub unsafe fn _mm512_maskz_extracti32x4_epi32<const IMM2: i32>(k: __mmask8, a: _
)]
#[rustc_legacy_const_generics(1)]
pub unsafe fn _mm256_extracti32x4_epi32<const IMM1: i32>(a: __m256i) -> __m128i {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
let a = a.as_i32x8();
let undefined = _mm256_undefined_si256().as_i32x8();
let extract: i32x4 = match IMM1 {
- 0 => simd_shuffle4!(a, undefined, [0, 1, 2, 3]),
- _ => simd_shuffle4!(a, undefined, [4, 5, 6, 7]),
+ 0 => simd_shuffle!(a, undefined, [0, 1, 2, 3]),
+ _ => simd_shuffle!(a, undefined, [4, 5, 6, 7]),
};
transmute(extract)
}
/// Extract 128 bits (composed of 4 packed 32-bit integers) from a, selected with IMM1, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_extracti32x4_epi32&expand=2459)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_extracti32x4_epi32&expand=2459)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(
@@ -22254,14 +22254,14 @@ pub unsafe fn _mm256_mask_extracti32x4_epi32<const IMM1: i32>(
k: __mmask8,
a: __m256i,
) -> __m128i {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
let r = _mm256_extracti32x4_epi32::<IMM1>(a);
transmute(simd_select_bitmask(k, r.as_i32x4(), src.as_i32x4()))
}
/// Extract 128 bits (composed of 4 packed 32-bit integers) from a, selected with IMM1, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_extracti32x4_epi32&expand=2460)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_extracti32x4_epi32&expand=2460)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(
@@ -22270,7 +22270,7 @@ pub unsafe fn _mm256_mask_extracti32x4_epi32<const IMM1: i32>(
)]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_maskz_extracti32x4_epi32<const IMM1: i32>(k: __mmask8, a: __m256i) -> __m128i {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
let r = _mm256_extracti32x4_epi32::<IMM1>(a);
let zero = _mm_setzero_si128().as_i32x4();
transmute(simd_select_bitmask(k, r.as_i32x4(), zero))
@@ -22278,41 +22278,41 @@ pub unsafe fn _mm256_maskz_extracti32x4_epi32<const IMM1: i32>(k: __mmask8, a: _
/// Duplicate even-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_moveldup_ps&expand=3862)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_moveldup_ps&expand=3862)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovsldup))]
pub unsafe fn _mm512_moveldup_ps(a: __m512) -> __m512 {
- let r: f32x16 = simd_shuffle16!(a, a, [0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14]);
+ let r: f32x16 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14]);
transmute(r)
}
/// Duplicate even-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_moveldup_ps&expand=3860)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_moveldup_ps&expand=3860)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovsldup))]
pub unsafe fn _mm512_mask_moveldup_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 {
- let mov: f32x16 = simd_shuffle16!(a, a, [0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14]);
+ let mov: f32x16 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14]);
transmute(simd_select_bitmask(k, mov, src.as_f32x16()))
}
/// Duplicate even-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_moveldup_ps&expand=3861)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_moveldup_ps&expand=3861)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovsldup))]
pub unsafe fn _mm512_maskz_moveldup_ps(k: __mmask16, a: __m512) -> __m512 {
- let mov: f32x16 = simd_shuffle16!(a, a, [0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14]);
+ let mov: f32x16 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14]);
let zero = _mm512_setzero_ps().as_f32x16();
transmute(simd_select_bitmask(k, mov, zero))
}
/// Duplicate even-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_moveldup_ps&expand=3857)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_moveldup_ps&expand=3857)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovsldup))]
@@ -22323,7 +22323,7 @@ pub unsafe fn _mm256_mask_moveldup_ps(src: __m256, k: __mmask8, a: __m256) -> __
/// Duplicate even-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_moveldup_ps&expand=3858)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_moveldup_ps&expand=3858)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovsldup))]
@@ -22335,7 +22335,7 @@ pub unsafe fn _mm256_maskz_moveldup_ps(k: __mmask8, a: __m256) -> __m256 {
/// Duplicate even-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_moveldup_ps&expand=3854)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_moveldup_ps&expand=3854)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovsldup))]
@@ -22346,7 +22346,7 @@ pub unsafe fn _mm_mask_moveldup_ps(src: __m128, k: __mmask8, a: __m128) -> __m12
/// Duplicate even-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_moveldup_ps&expand=3855)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_moveldup_ps&expand=3855)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovsldup))]
@@ -22358,41 +22358,41 @@ pub unsafe fn _mm_maskz_moveldup_ps(k: __mmask8, a: __m128) -> __m128 {
/// Duplicate odd-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_movehdup_ps&expand=3852)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_movehdup_ps&expand=3852)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovshdup))]
pub unsafe fn _mm512_movehdup_ps(a: __m512) -> __m512 {
- let r: f32x16 = simd_shuffle16!(a, a, [1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15]);
+ let r: f32x16 = simd_shuffle!(a, a, [1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15]);
transmute(r)
}
/// Duplicate odd-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_movehdup&expand=3850)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_movehdup&expand=3850)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovshdup))]
pub unsafe fn _mm512_mask_movehdup_ps(src: __m512, k: __mmask16, a: __m512) -> __m512 {
- let mov: f32x16 = simd_shuffle16!(a, a, [1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15]);
+ let mov: f32x16 = simd_shuffle!(a, a, [1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15]);
transmute(simd_select_bitmask(k, mov, src.as_f32x16()))
}
/// Duplicate odd-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_moveh&expand=3851)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_moveh&expand=3851)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovshdup))]
pub unsafe fn _mm512_maskz_movehdup_ps(k: __mmask16, a: __m512) -> __m512 {
- let mov: f32x16 = simd_shuffle16!(a, a, [1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15]);
+ let mov: f32x16 = simd_shuffle!(a, a, [1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15]);
let zero = _mm512_setzero_ps().as_f32x16();
transmute(simd_select_bitmask(k, mov, zero))
}
/// Duplicate odd-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_movehdup_ps&expand=3847)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_movehdup_ps&expand=3847)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovshdup))]
@@ -22403,7 +22403,7 @@ pub unsafe fn _mm256_mask_movehdup_ps(src: __m256, k: __mmask8, a: __m256) -> __
/// Duplicate odd-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_movehdup_ps&expand=3848)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_movehdup_ps&expand=3848)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovshdup))]
@@ -22415,7 +22415,7 @@ pub unsafe fn _mm256_maskz_movehdup_ps(k: __mmask8, a: __m256) -> __m256 {
/// Duplicate odd-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_movehdup_ps&expand=3844)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_movehdup_ps&expand=3844)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovshdup))]
@@ -22426,7 +22426,7 @@ pub unsafe fn _mm_mask_movehdup_ps(src: __m128, k: __mmask8, a: __m128) -> __m12
/// Duplicate odd-indexed single-precision (32-bit) floating-point elements from a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_movehdup_ps&expand=3845)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_movehdup_ps&expand=3845)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovshdup))]
@@ -22438,41 +22438,41 @@ pub unsafe fn _mm_maskz_movehdup_ps(k: __mmask8, a: __m128) -> __m128 {
/// Duplicate even-indexed double-precision (64-bit) floating-point elements from a, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_movedup_pd&expand=3843)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_movedup_pd&expand=3843)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovddup))]
pub unsafe fn _mm512_movedup_pd(a: __m512d) -> __m512d {
- let r: f64x8 = simd_shuffle8!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]);
+ let r: f64x8 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]);
transmute(r)
}
/// Duplicate even-indexed double-precision (64-bit) floating-point elements from a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_movedup_pd&expand=3841)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_movedup_pd&expand=3841)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovddup))]
pub unsafe fn _mm512_mask_movedup_pd(src: __m512d, k: __mmask8, a: __m512d) -> __m512d {
- let mov: f64x8 = simd_shuffle8!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]);
+ let mov: f64x8 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]);
transmute(simd_select_bitmask(k, mov, src.as_f64x8()))
}
/// Duplicate even-indexed double-precision (64-bit) floating-point elements from a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_movedup_pd&expand=3842)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_movedup_pd&expand=3842)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovddup))]
pub unsafe fn _mm512_maskz_movedup_pd(k: __mmask8, a: __m512d) -> __m512d {
- let mov: f64x8 = simd_shuffle8!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]);
+ let mov: f64x8 = simd_shuffle!(a, a, [0, 0, 2, 2, 4, 4, 6, 6]);
let zero = _mm512_setzero_pd().as_f64x8();
transmute(simd_select_bitmask(k, mov, zero))
}
/// Duplicate even-indexed double-precision (64-bit) floating-point elements from a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_movedup_pd&expand=3838)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_movedup_pd&expand=3838)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovddup))]
@@ -22483,7 +22483,7 @@ pub unsafe fn _mm256_mask_movedup_pd(src: __m256d, k: __mmask8, a: __m256d) -> _
/// Duplicate even-indexed double-precision (64-bit) floating-point elements from a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_movedup_pd&expand=3839)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_movedup_pd&expand=3839)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovddup))]
@@ -22495,7 +22495,7 @@ pub unsafe fn _mm256_maskz_movedup_pd(k: __mmask8, a: __m256d) -> __m256d {
/// Duplicate even-indexed double-precision (64-bit) floating-point elements from a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_movedup_pd&expand=3835)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_movedup_pd&expand=3835)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovddup))]
@@ -22506,7 +22506,7 @@ pub unsafe fn _mm_mask_movedup_pd(src: __m128d, k: __mmask8, a: __m128d) -> __m1
/// Duplicate even-indexed double-precision (64-bit) floating-point elements from a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_movedup_pd&expand=3836)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_movedup_pd&expand=3836)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovddup))]
@@ -22518,39 +22518,39 @@ pub unsafe fn _mm_maskz_movedup_pd(k: __mmask8, a: __m128d) -> __m128d {
/// Copy a to dst, then insert 128 bits (composed of 4 packed 32-bit integers) from b into dst at the location specified by imm8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_inserti32x4&expand=3174)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_inserti32x4&expand=3174)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))] //should be vinserti32x4
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_inserti32x4<const IMM8: i32>(a: __m512i, b: __m128i) -> __m512i {
- static_assert_imm2!(IMM8);
+ static_assert_uimm_bits!(IMM8, 2);
let a = a.as_i32x16();
let b = _mm512_castsi128_si512(b).as_i32x16();
let ret: i32x16 = match IMM8 & 0b11 {
- 0 => simd_shuffle16!(
+ 0 => simd_shuffle!(
a,
b,
[16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
),
- 1 => simd_shuffle16!(
+ 1 => simd_shuffle!(
a,
b,
[0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 12, 13, 14, 15],
),
- 2 => simd_shuffle16!(
+ 2 => simd_shuffle!(
a,
b,
[0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 12, 13, 14, 15],
),
- _ => simd_shuffle16!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 17, 18, 19]),
+ _ => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 17, 18, 19]),
};
transmute(ret)
}
/// Copy a to tmp, then insert 128 bits (composed of 4 packed 32-bit integers) from b into tmp at the location specified by imm8. Store tmp to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_inserti32x4&expand=3175)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_inserti32x4&expand=3175)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vinserti32x4, IMM8 = 2))]
@@ -22561,14 +22561,14 @@ pub unsafe fn _mm512_mask_inserti32x4<const IMM8: i32>(
a: __m512i,
b: __m128i,
) -> __m512i {
- static_assert_imm2!(IMM8);
+ static_assert_uimm_bits!(IMM8, 2);
let r = _mm512_inserti32x4::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i32x16(), src.as_i32x16()))
}
/// Copy a to tmp, then insert 128 bits (composed of 4 packed 32-bit integers) from b into tmp at the location specified by imm8. Store tmp to dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_inserti32x4&expand=3176)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_inserti32x4&expand=3176)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vinserti32x4, IMM8 = 2))]
@@ -22578,7 +22578,7 @@ pub unsafe fn _mm512_maskz_inserti32x4<const IMM8: i32>(
a: __m512i,
b: __m128i,
) -> __m512i {
- static_assert_imm2!(IMM8);
+ static_assert_uimm_bits!(IMM8, 2);
let r = _mm512_inserti32x4::<IMM8>(a, b);
let zero = _mm512_setzero_si512().as_i32x16();
transmute(simd_select_bitmask(k, r.as_i32x16(), zero))
@@ -22586,7 +22586,7 @@ pub unsafe fn _mm512_maskz_inserti32x4<const IMM8: i32>(
/// Copy a to dst, then insert 128 bits (composed of 4 packed 32-bit integers) from b into dst at the location specified by imm8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_inserti32x4&expand=3171)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_inserti32x4&expand=3171)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(
@@ -22595,19 +22595,19 @@ pub unsafe fn _mm512_maskz_inserti32x4<const IMM8: i32>(
)]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_inserti32x4<const IMM8: i32>(a: __m256i, b: __m128i) -> __m256i {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let a = a.as_i32x8();
let b = _mm256_castsi128_si256(b).as_i32x8();
let ret: i32x8 = match IMM8 & 0b1 {
- 0 => simd_shuffle8!(a, b, [8, 9, 10, 11, 4, 5, 6, 7]),
- _ => simd_shuffle8!(a, b, [0, 1, 2, 3, 8, 9, 10, 11]),
+ 0 => simd_shuffle!(a, b, [8, 9, 10, 11, 4, 5, 6, 7]),
+ _ => simd_shuffle!(a, b, [0, 1, 2, 3, 8, 9, 10, 11]),
};
transmute(ret)
}
/// Copy a to tmp, then insert 128 bits (composed of 4 packed 32-bit integers) from b into tmp at the location specified by imm8. Store tmp to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_inserti32x4&expand=3172)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_inserti32x4&expand=3172)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(
@@ -22621,14 +22621,14 @@ pub unsafe fn _mm256_mask_inserti32x4<const IMM8: i32>(
a: __m256i,
b: __m128i,
) -> __m256i {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let r = _mm256_inserti32x4::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i32x8(), src.as_i32x8()))
}
/// Copy a to tmp, then insert 128 bits (composed of 4 packed 32-bit integers) from b into tmp at the location specified by imm8. Store tmp to dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_inserti32x4&expand=3173)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_inserti32x4&expand=3173)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(
@@ -22641,7 +22641,7 @@ pub unsafe fn _mm256_maskz_inserti32x4<const IMM8: i32>(
a: __m256i,
b: __m128i,
) -> __m256i {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let r = _mm256_inserti32x4::<IMM8>(a, b);
let zero = _mm256_setzero_si256().as_i32x8();
transmute(simd_select_bitmask(k, r.as_i32x8(), zero))
@@ -22649,23 +22649,23 @@ pub unsafe fn _mm256_maskz_inserti32x4<const IMM8: i32>(
/// Copy a to dst, then insert 256 bits (composed of 4 packed 64-bit integers) from b into dst at the location specified by imm8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_inserti64x4&expand=3186)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_inserti64x4&expand=3186)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))] //should be vinserti64x4
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_inserti64x4<const IMM8: i32>(a: __m512i, b: __m256i) -> __m512i {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let b = _mm512_castsi256_si512(b);
match IMM8 & 0b1 {
- 0 => simd_shuffle8!(a, b, [8, 9, 10, 11, 4, 5, 6, 7]),
- _ => simd_shuffle8!(a, b, [0, 1, 2, 3, 8, 9, 10, 11]),
+ 0 => simd_shuffle!(a, b, [8, 9, 10, 11, 4, 5, 6, 7]),
+ _ => simd_shuffle!(a, b, [0, 1, 2, 3, 8, 9, 10, 11]),
}
}
/// Copy a to tmp, then insert 256 bits (composed of 4 packed 64-bit integers) from b into tmp at the location specified by imm8. Store tmp to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_inserti64x4&expand=3187)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_inserti64x4&expand=3187)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vinserti64x4, IMM8 = 1))]
@@ -22676,14 +22676,14 @@ pub unsafe fn _mm512_mask_inserti64x4<const IMM8: i32>(
a: __m512i,
b: __m256i,
) -> __m512i {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let r = _mm512_inserti64x4::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i64x8(), src.as_i64x8()))
}
/// Copy a to tmp, then insert 256 bits (composed of 4 packed 64-bit integers) from b into tmp at the location specified by imm8. Store tmp to dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_inserti64x4&expand=3188)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_inserti64x4&expand=3188)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vinserti64x4, IMM8 = 1))]
@@ -22693,7 +22693,7 @@ pub unsafe fn _mm512_maskz_inserti64x4<const IMM8: i32>(
a: __m512i,
b: __m256i,
) -> __m512i {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let r = _mm512_inserti64x4::<IMM8>(a, b);
let zero = _mm512_setzero_si512().as_i64x8();
transmute(simd_select_bitmask(k, r.as_i64x8(), zero))
@@ -22701,37 +22701,37 @@ pub unsafe fn _mm512_maskz_inserti64x4<const IMM8: i32>(
/// Copy a to dst, then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from b into dst at the location specified by imm8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_insertf32x4&expand=3155)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_insertf32x4&expand=3155)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_insertf32x4<const IMM8: i32>(a: __m512, b: __m128) -> __m512 {
- static_assert_imm2!(IMM8);
+ static_assert_uimm_bits!(IMM8, 2);
let b = _mm512_castps128_ps512(b);
match IMM8 & 0b11 {
- 0 => simd_shuffle16!(
+ 0 => simd_shuffle!(
a,
b,
[16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
),
- 1 => simd_shuffle16!(
+ 1 => simd_shuffle!(
a,
b,
[0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 12, 13, 14, 15],
),
- 2 => simd_shuffle16!(
+ 2 => simd_shuffle!(
a,
b,
[0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 12, 13, 14, 15],
),
- _ => simd_shuffle16!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 17, 18, 19]),
+ _ => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 17, 18, 19]),
}
}
/// Copy a to tmp, then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from b into tmp at the location specified by imm8. Store tmp to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_insertf32x4&expand=3156)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_insertf32x4&expand=3156)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))]
@@ -22742,14 +22742,14 @@ pub unsafe fn _mm512_mask_insertf32x4<const IMM8: i32>(
a: __m512,
b: __m128,
) -> __m512 {
- static_assert_imm2!(IMM8);
+ static_assert_uimm_bits!(IMM8, 2);
let r = _mm512_insertf32x4::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_f32x16(), src.as_f32x16()))
}
/// Copy a to tmp, then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from b into tmp at the location specified by imm8. Store tmp to dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_insertf32x4&expand=3157)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_insertf32x4&expand=3157)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vinsertf32x4, IMM8 = 2))]
@@ -22759,7 +22759,7 @@ pub unsafe fn _mm512_maskz_insertf32x4<const IMM8: i32>(
a: __m512,
b: __m128,
) -> __m512 {
- static_assert_imm2!(IMM8);
+ static_assert_uimm_bits!(IMM8, 2);
let r = _mm512_insertf32x4::<IMM8>(a, b);
let zero = _mm512_setzero_ps().as_f32x16();
transmute(simd_select_bitmask(k, r.as_f32x16(), zero))
@@ -22767,7 +22767,7 @@ pub unsafe fn _mm512_maskz_insertf32x4<const IMM8: i32>(
/// Copy a to dst, then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from b into dst at the location specified by imm8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insertf32x4&expand=3152)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_insertf32x4&expand=3152)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(
@@ -22776,17 +22776,17 @@ pub unsafe fn _mm512_maskz_insertf32x4<const IMM8: i32>(
)]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_insertf32x4<const IMM8: i32>(a: __m256, b: __m128) -> __m256 {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let b = _mm256_castps128_ps256(b);
match IMM8 & 0b1 {
- 0 => simd_shuffle8!(a, b, [8, 9, 10, 11, 4, 5, 6, 7]),
- _ => simd_shuffle8!(a, b, [0, 1, 2, 3, 8, 9, 10, 11]),
+ 0 => simd_shuffle!(a, b, [8, 9, 10, 11, 4, 5, 6, 7]),
+ _ => simd_shuffle!(a, b, [0, 1, 2, 3, 8, 9, 10, 11]),
}
}
/// Copy a to tmp, then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from b into tmp at the location specified by imm8. Store tmp to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_insertf32x4&expand=3153)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_insertf32x4&expand=3153)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(
@@ -22800,14 +22800,14 @@ pub unsafe fn _mm256_mask_insertf32x4<const IMM8: i32>(
a: __m256,
b: __m128,
) -> __m256 {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let r = _mm256_insertf32x4::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_f32x8(), src.as_f32x8()))
}
/// Copy a to tmp, then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from b into tmp at the location specified by imm8. Store tmp to dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_insertf32x4&expand=3154)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_insertf32x4&expand=3154)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(
@@ -22820,7 +22820,7 @@ pub unsafe fn _mm256_maskz_insertf32x4<const IMM8: i32>(
a: __m256,
b: __m128,
) -> __m256 {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let r = _mm256_insertf32x4::<IMM8>(a, b);
let zero = _mm256_setzero_ps().as_f32x8();
transmute(simd_select_bitmask(k, r.as_f32x8(), zero))
@@ -22828,23 +22828,23 @@ pub unsafe fn _mm256_maskz_insertf32x4<const IMM8: i32>(
/// Copy a to dst, then insert 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from b into dst at the location specified by imm8.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_insertf64x4&expand=3167)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_insertf64x4&expand=3167)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_insertf64x4<const IMM8: i32>(a: __m512d, b: __m256d) -> __m512d {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let b = _mm512_castpd256_pd512(b);
match IMM8 & 0b1 {
- 0 => simd_shuffle8!(a, b, [8, 9, 10, 11, 4, 5, 6, 7]),
- _ => simd_shuffle8!(a, b, [0, 1, 2, 3, 8, 9, 10, 11]),
+ 0 => simd_shuffle!(a, b, [8, 9, 10, 11, 4, 5, 6, 7]),
+ _ => simd_shuffle!(a, b, [0, 1, 2, 3, 8, 9, 10, 11]),
}
}
/// Copy a to tmp, then insert 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from b into tmp at the location specified by imm8. Store tmp to dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_insertf64x4&expand=3168)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_insertf64x4&expand=3168)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))]
@@ -22855,14 +22855,14 @@ pub unsafe fn _mm512_mask_insertf64x4<const IMM8: i32>(
a: __m512d,
b: __m256d,
) -> __m512d {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let r = _mm512_insertf64x4::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_f64x8(), src.as_f64x8()))
}
/// Copy a to tmp, then insert 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from b into tmp at the location specified by imm8. Store tmp to dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_insertf64x4&expand=3169)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_insertf64x4&expand=3169)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vinsertf64x4, IMM8 = 1))]
@@ -22872,7 +22872,7 @@ pub unsafe fn _mm512_maskz_insertf64x4<const IMM8: i32>(
a: __m512d,
b: __m256d,
) -> __m512d {
- static_assert_imm1!(IMM8);
+ static_assert_uimm_bits!(IMM8, 1);
let r = _mm512_insertf64x4::<IMM8>(a, b);
let zero = _mm512_setzero_pd().as_f64x8();
transmute(simd_select_bitmask(k, r.as_f64x8(), zero))
@@ -22880,7 +22880,7 @@ pub unsafe fn _mm512_maskz_insertf64x4<const IMM8: i32>(
/// Unpack and interleave 32-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_unpackhi_epi32&expand=6021)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpackhi_epi32&expand=6021)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpckhps))] //should be vpunpckhdq
@@ -22888,7 +22888,7 @@ pub unsafe fn _mm512_unpackhi_epi32(a: __m512i, b: __m512i) -> __m512i {
let a = a.as_i32x16();
let b = b.as_i32x16();
#[rustfmt::skip]
- let r: i32x16 = simd_shuffle16!(
+ let r: i32x16 = simd_shuffle!(
a, b,
[ 2, 18, 3, 19,
2 + 4, 18 + 4, 3 + 4, 19 + 4,
@@ -22900,7 +22900,7 @@ pub unsafe fn _mm512_unpackhi_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Unpack and interleave 32-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_unpackhi_epi32&expand=6019)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpackhi_epi32&expand=6019)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpunpckhdq))]
@@ -22916,7 +22916,7 @@ pub unsafe fn _mm512_mask_unpackhi_epi32(
/// Unpack and interleave 32-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_unpackhi_epi32&expand=6020)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpackhi_epi32&expand=6020)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpunpckhdq))]
@@ -22928,7 +22928,7 @@ pub unsafe fn _mm512_maskz_unpackhi_epi32(k: __mmask16, a: __m512i, b: __m512i)
/// Unpack and interleave 32-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_unpackhi_epi32&expand=6016)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpackhi_epi32&expand=6016)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhdq))]
@@ -22944,7 +22944,7 @@ pub unsafe fn _mm256_mask_unpackhi_epi32(
/// Unpack and interleave 32-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_unpackhi_epi32&expand=6017)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpackhi_epi32&expand=6017)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhdq))]
@@ -22956,7 +22956,7 @@ pub unsafe fn _mm256_maskz_unpackhi_epi32(k: __mmask8, a: __m256i, b: __m256i) -
/// Unpack and interleave 32-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_unpackhi_epi32&expand=6013)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpackhi_epi32&expand=6013)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhdq))]
@@ -22972,7 +22972,7 @@ pub unsafe fn _mm_mask_unpackhi_epi32(
/// Unpack and interleave 32-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_unpackhi_epi32&expand=6014)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpackhi_epi32&expand=6014)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhdq))]
@@ -22984,17 +22984,17 @@ pub unsafe fn _mm_maskz_unpackhi_epi32(k: __mmask8, a: __m128i, b: __m128i) -> _
/// Unpack and interleave 64-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_unpackhi_epi64&expand=6030)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpackhi_epi64&expand=6030)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpckhpd))] //should be vpunpckhqdq
pub unsafe fn _mm512_unpackhi_epi64(a: __m512i, b: __m512i) -> __m512i {
- simd_shuffle8!(a, b, [1, 9, 1 + 2, 9 + 2, 1 + 4, 9 + 4, 1 + 6, 9 + 6])
+ simd_shuffle!(a, b, [1, 9, 1 + 2, 9 + 2, 1 + 4, 9 + 4, 1 + 6, 9 + 6])
}
/// Unpack and interleave 64-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_unpackhi_epi64&expand=6028)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpackhi_epi64&expand=6028)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpunpckhqdq))]
@@ -23010,7 +23010,7 @@ pub unsafe fn _mm512_mask_unpackhi_epi64(
/// Unpack and interleave 64-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_unpackhi_epi64&expand=6029)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpackhi_epi64&expand=6029)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpunpckhqdq))]
@@ -23022,7 +23022,7 @@ pub unsafe fn _mm512_maskz_unpackhi_epi64(k: __mmask8, a: __m512i, b: __m512i) -
/// Unpack and interleave 64-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_unpackhi_epi64&expand=6025)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpackhi_epi64&expand=6025)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhqdq))]
@@ -23038,7 +23038,7 @@ pub unsafe fn _mm256_mask_unpackhi_epi64(
/// Unpack and interleave 64-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_unpackhi_epi64&expand=6026)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpackhi_epi64&expand=6026)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhqdq))]
@@ -23050,7 +23050,7 @@ pub unsafe fn _mm256_maskz_unpackhi_epi64(k: __mmask8, a: __m256i, b: __m256i) -
/// Unpack and interleave 64-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_unpackhi_epi64&expand=6022)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpackhi_epi64&expand=6022)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhqdq))]
@@ -23066,7 +23066,7 @@ pub unsafe fn _mm_mask_unpackhi_epi64(
/// Unpack and interleave 64-bit integers from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_unpackhi_epi64&expand=6023)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpackhi_epi64&expand=6023)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckhqdq))]
@@ -23078,13 +23078,13 @@ pub unsafe fn _mm_maskz_unpackhi_epi64(k: __mmask8, a: __m128i, b: __m128i) -> _
/// Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_unpackhi_ps&expand=6060)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpackhi_ps&expand=6060)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpckhps))]
pub unsafe fn _mm512_unpackhi_ps(a: __m512, b: __m512) -> __m512 {
#[rustfmt::skip]
- simd_shuffle16!(
+ simd_shuffle!(
a, b,
[ 2, 18, 3, 19,
2 + 4, 18 + 4, 3 + 4, 19 + 4,
@@ -23095,7 +23095,7 @@ pub unsafe fn _mm512_unpackhi_ps(a: __m512, b: __m512) -> __m512 {
/// Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_unpackhi_ps&expand=6058)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpackhi_ps&expand=6058)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpckhps))]
@@ -23106,7 +23106,7 @@ pub unsafe fn _mm512_mask_unpackhi_ps(src: __m512, k: __mmask16, a: __m512, b: _
/// Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_unpackhi_ps&expand=6059)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpackhi_ps&expand=6059)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpckhps))]
@@ -23118,7 +23118,7 @@ pub unsafe fn _mm512_maskz_unpackhi_ps(k: __mmask16, a: __m512, b: __m512) -> __
/// Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_unpackhi_ps&expand=6055)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpackhi_ps&expand=6055)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpckhps))]
@@ -23129,7 +23129,7 @@ pub unsafe fn _mm256_mask_unpackhi_ps(src: __m256, k: __mmask8, a: __m256, b: __
/// Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_unpackhi_ps&expand=6056)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpackhi_ps&expand=6056)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpckhps))]
@@ -23141,7 +23141,7 @@ pub unsafe fn _mm256_maskz_unpackhi_ps(k: __mmask8, a: __m256, b: __m256) -> __m
/// Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_unpackhi_ps&expand=6052)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpackhi_ps&expand=6052)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpckhps))]
@@ -23152,7 +23152,7 @@ pub unsafe fn _mm_mask_unpackhi_ps(src: __m128, k: __mmask8, a: __m128, b: __m12
/// Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_unpackhi_ps&expand=6053)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpackhi_ps&expand=6053)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpckhps))]
@@ -23164,17 +23164,17 @@ pub unsafe fn _mm_maskz_unpackhi_ps(k: __mmask8, a: __m128, b: __m128) -> __m128
/// Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_unpackhi_pd&expand=6048)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpackhi_pd&expand=6048)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpckhpd))]
pub unsafe fn _mm512_unpackhi_pd(a: __m512d, b: __m512d) -> __m512d {
- simd_shuffle8!(a, b, [1, 9, 1 + 2, 9 + 2, 1 + 4, 9 + 4, 1 + 6, 9 + 6])
+ simd_shuffle!(a, b, [1, 9, 1 + 2, 9 + 2, 1 + 4, 9 + 4, 1 + 6, 9 + 6])
}
/// Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_unpackhi_pd&expand=6046)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpackhi_pd&expand=6046)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpckhpd))]
@@ -23190,7 +23190,7 @@ pub unsafe fn _mm512_mask_unpackhi_pd(
/// Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_unpackhi_pd&expand=6047)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpackhi_pd&expand=6047)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpckhpd))]
@@ -23202,7 +23202,7 @@ pub unsafe fn _mm512_maskz_unpackhi_pd(k: __mmask8, a: __m512d, b: __m512d) -> _
/// Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_unpackhi_pd&expand=6043)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpackhi_pd&expand=6043)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpckhpd))]
@@ -23218,7 +23218,7 @@ pub unsafe fn _mm256_mask_unpackhi_pd(
/// Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_unpackhi_pd&expand=6044)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpackhi_pd&expand=6044)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpckhpd))]
@@ -23230,7 +23230,7 @@ pub unsafe fn _mm256_maskz_unpackhi_pd(k: __mmask8, a: __m256d, b: __m256d) -> _
/// Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_unpackhi_pd&expand=6040)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpackhi_pd&expand=6040)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpckhpd))]
@@ -23241,7 +23241,7 @@ pub unsafe fn _mm_mask_unpackhi_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m
/// Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_unpackhi_pd&expand=6041)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpackhi_pd&expand=6041)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpckhpd))]
@@ -23253,7 +23253,7 @@ pub unsafe fn _mm_maskz_unpackhi_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m1
/// Unpack and interleave 32-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_unpacklo_epi32&expand=6078)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpacklo_epi32&expand=6078)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpcklps))] //should be vpunpckldq
@@ -23261,7 +23261,7 @@ pub unsafe fn _mm512_unpacklo_epi32(a: __m512i, b: __m512i) -> __m512i {
let a = a.as_i32x16();
let b = b.as_i32x16();
#[rustfmt::skip]
- let r: i32x16 = simd_shuffle16!(
+ let r: i32x16 = simd_shuffle!(
a, b,
[ 0, 16, 1, 17,
0 + 4, 16 + 4, 1 + 4, 17 + 4,
@@ -23273,7 +23273,7 @@ pub unsafe fn _mm512_unpacklo_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Unpack and interleave 32-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_unpacklo_epi32&expand=6076)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpacklo_epi32&expand=6076)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpunpckldq))]
@@ -23289,7 +23289,7 @@ pub unsafe fn _mm512_mask_unpacklo_epi32(
/// Unpack and interleave 32-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_unpacklo_epi32&expand=6077)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpacklo_epi32&expand=6077)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpunpckldq))]
@@ -23301,7 +23301,7 @@ pub unsafe fn _mm512_maskz_unpacklo_epi32(k: __mmask16, a: __m512i, b: __m512i)
/// Unpack and interleave 32-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_unpacklo_epi32&expand=6073)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpacklo_epi32&expand=6073)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckldq))]
@@ -23317,7 +23317,7 @@ pub unsafe fn _mm256_mask_unpacklo_epi32(
/// Unpack and interleave 32-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_unpacklo_epi32&expand=6074)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpacklo_epi32&expand=6074)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckldq))]
@@ -23329,7 +23329,7 @@ pub unsafe fn _mm256_maskz_unpacklo_epi32(k: __mmask8, a: __m256i, b: __m256i) -
/// Unpack and interleave 32-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_unpacklo_epi32&expand=6070)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpacklo_epi32&expand=6070)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckldq))]
@@ -23345,7 +23345,7 @@ pub unsafe fn _mm_mask_unpacklo_epi32(
/// Unpack and interleave 32-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_unpacklo_epi32&expand=6071)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpacklo_epi32&expand=6071)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpckldq))]
@@ -23357,17 +23357,17 @@ pub unsafe fn _mm_maskz_unpacklo_epi32(k: __mmask8, a: __m128i, b: __m128i) -> _
/// Unpack and interleave 64-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_unpacklo_epi64&expand=6087)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpacklo_epi64&expand=6087)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpcklpd))] //should be vpunpcklqdq
pub unsafe fn _mm512_unpacklo_epi64(a: __m512i, b: __m512i) -> __m512i {
- simd_shuffle8!(a, b, [0, 8, 0 + 2, 8 + 2, 0 + 4, 8 + 4, 0 + 6, 8 + 6])
+ simd_shuffle!(a, b, [0, 8, 0 + 2, 8 + 2, 0 + 4, 8 + 4, 0 + 6, 8 + 6])
}
/// Unpack and interleave 64-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_unpacklo_epi64&expand=6085)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpacklo_epi64&expand=6085)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpunpcklqdq))]
@@ -23383,7 +23383,7 @@ pub unsafe fn _mm512_mask_unpacklo_epi64(
/// Unpack and interleave 64-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_unpacklo_epi64&expand=6086)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpacklo_epi64&expand=6086)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpunpcklqdq))]
@@ -23395,7 +23395,7 @@ pub unsafe fn _mm512_maskz_unpacklo_epi64(k: __mmask8, a: __m512i, b: __m512i) -
/// Unpack and interleave 64-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_unpacklo_epi64&expand=6082)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpacklo_epi64&expand=6082)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpcklqdq))]
@@ -23411,7 +23411,7 @@ pub unsafe fn _mm256_mask_unpacklo_epi64(
/// Unpack and interleave 64-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_unpacklo_epi64&expand=6083)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpacklo_epi64&expand=6083)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpcklqdq))]
@@ -23423,7 +23423,7 @@ pub unsafe fn _mm256_maskz_unpacklo_epi64(k: __mmask8, a: __m256i, b: __m256i) -
/// Unpack and interleave 64-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_unpacklo_epi64&expand=6079)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpacklo_epi64&expand=6079)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpcklqdq))]
@@ -23439,7 +23439,7 @@ pub unsafe fn _mm_mask_unpacklo_epi64(
/// Unpack and interleave 64-bit integers from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_unpacklo_epi64&expand=6080)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpacklo_epi64&expand=6080)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpunpcklqdq))]
@@ -23451,13 +23451,13 @@ pub unsafe fn _mm_maskz_unpacklo_epi64(k: __mmask8, a: __m128i, b: __m128i) -> _
/// Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_unpacklo_ps&expand=6117)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpacklo_ps&expand=6117)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpcklps))]
pub unsafe fn _mm512_unpacklo_ps(a: __m512, b: __m512) -> __m512 {
#[rustfmt::skip]
- simd_shuffle16!(a, b,
+ simd_shuffle!(a, b,
[ 0, 16, 1, 17,
0 + 4, 16 + 4, 1 + 4, 17 + 4,
0 + 8, 16 + 8, 1 + 8, 17 + 8,
@@ -23467,7 +23467,7 @@ pub unsafe fn _mm512_unpacklo_ps(a: __m512, b: __m512) -> __m512 {
/// Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_unpacklo_ps&expand=6115)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpacklo_ps&expand=6115)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpcklps))]
@@ -23478,7 +23478,7 @@ pub unsafe fn _mm512_mask_unpacklo_ps(src: __m512, k: __mmask16, a: __m512, b: _
/// Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_unpacklo_ps&expand=6116)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpacklo_ps&expand=6116)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpcklps))]
@@ -23490,7 +23490,7 @@ pub unsafe fn _mm512_maskz_unpacklo_ps(k: __mmask16, a: __m512, b: __m512) -> __
/// Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_unpacklo_ps&expand=6112)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpacklo_ps&expand=6112)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpcklps))]
@@ -23501,7 +23501,7 @@ pub unsafe fn _mm256_mask_unpacklo_ps(src: __m256, k: __mmask8, a: __m256, b: __
/// Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_unpacklo_ps&expand=6113)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpacklo_ps&expand=6113)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpcklps))]
@@ -23513,7 +23513,7 @@ pub unsafe fn _mm256_maskz_unpacklo_ps(k: __mmask8, a: __m256, b: __m256) -> __m
/// Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_unpacklo_ps&expand=6109)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpacklo_ps&expand=6109)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpcklps))]
@@ -23524,7 +23524,7 @@ pub unsafe fn _mm_mask_unpacklo_ps(src: __m128, k: __mmask8, a: __m128, b: __m12
/// Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_unpacklo_ps&expand=6110)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpacklo_ps&expand=6110)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpcklps))]
@@ -23536,17 +23536,17 @@ pub unsafe fn _mm_maskz_unpacklo_ps(k: __mmask8, a: __m128, b: __m128) -> __m128
/// Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_unpacklo_pd&expand=6105)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_unpacklo_pd&expand=6105)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpcklpd))]
pub unsafe fn _mm512_unpacklo_pd(a: __m512d, b: __m512d) -> __m512d {
- simd_shuffle8!(a, b, [0, 8, 0 + 2, 8 + 2, 0 + 4, 8 + 4, 0 + 6, 8 + 6])
+ simd_shuffle!(a, b, [0, 8, 0 + 2, 8 + 2, 0 + 4, 8 + 4, 0 + 6, 8 + 6])
}
/// Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_unpacklo_pd&expand=6103)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_unpacklo_pd&expand=6103)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpcklpd))]
@@ -23562,7 +23562,7 @@ pub unsafe fn _mm512_mask_unpacklo_pd(
/// Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_unpacklo_pd&expand=6104)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_unpacklo_pd&expand=6104)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vunpcklpd))]
@@ -23574,7 +23574,7 @@ pub unsafe fn _mm512_maskz_unpacklo_pd(k: __mmask8, a: __m512d, b: __m512d) -> _
/// Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_unpacklo_pd&expand=6100)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_unpacklo_pd&expand=6100)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpcklpd))]
@@ -23590,7 +23590,7 @@ pub unsafe fn _mm256_mask_unpacklo_pd(
/// Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_unpacklo_pd&expand=6101)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_unpacklo_pd&expand=6101)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpcklpd))]
@@ -23602,7 +23602,7 @@ pub unsafe fn _mm256_maskz_unpacklo_pd(k: __mmask8, a: __m256d, b: __m256d) -> _
/// Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_unpacklo_pd&expand=6097)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_unpacklo_pd&expand=6097)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpcklpd))]
@@ -23613,7 +23613,7 @@ pub unsafe fn _mm_mask_unpacklo_pd(src: __m128d, k: __mmask8, a: __m128d, b: __m
/// Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_unpacklo_pd&expand=6098)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_unpacklo_pd&expand=6098)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vunpcklpd))]
@@ -23625,11 +23625,11 @@ pub unsafe fn _mm_maskz_unpacklo_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m1
/// Cast vector of type __m128 to type __m512; the upper 384 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castps128_ps512&expand=621)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castps128_ps512&expand=621)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castps128_ps512(a: __m128) -> __m512 {
- simd_shuffle16!(
+ simd_shuffle!(
a,
_mm_set1_ps(-1.),
[0, 1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
@@ -23638,11 +23638,11 @@ pub unsafe fn _mm512_castps128_ps512(a: __m128) -> __m512 {
/// Cast vector of type __m256 to type __m512; the upper 256 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castps256_ps512&expand=623)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castps256_ps512&expand=623)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castps256_ps512(a: __m256) -> __m512 {
- simd_shuffle16!(
+ simd_shuffle!(
a,
_mm256_set1_ps(-1.),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8, 8, 8, 8, 8],
@@ -23651,11 +23651,11 @@ pub unsafe fn _mm512_castps256_ps512(a: __m256) -> __m512 {
/// Cast vector of type __m128 to type __m512; the upper 384 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_zextps128_ps512&expand=6196)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_zextps128_ps512&expand=6196)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_zextps128_ps512(a: __m128) -> __m512 {
- simd_shuffle16!(
+ simd_shuffle!(
a,
_mm_set1_ps(0.),
[0, 1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
@@ -23664,11 +23664,11 @@ pub unsafe fn _mm512_zextps128_ps512(a: __m128) -> __m512 {
/// Cast vector of type __m256 to type __m512; the upper 256 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_zextps256_ps512&expand=6197)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_zextps256_ps512&expand=6197)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_zextps256_ps512(a: __m256) -> __m512 {
- simd_shuffle16!(
+ simd_shuffle!(
a,
_mm256_set1_ps(0.),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8, 8, 8, 8, 8],
@@ -23677,25 +23677,25 @@ pub unsafe fn _mm512_zextps256_ps512(a: __m256) -> __m512 {
/// Cast vector of type __m512 to type __m128. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castps512_ps128&expand=624)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castps512_ps128&expand=624)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castps512_ps128(a: __m512) -> __m128 {
- simd_shuffle4!(a, a, [0, 1, 2, 3])
+ simd_shuffle!(a, a, [0, 1, 2, 3])
}
/// Cast vector of type __m512 to type __m256. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castps512_ps256&expand=625)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castps512_ps256&expand=625)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castps512_ps256(a: __m512) -> __m256 {
- simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7])
+ simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7])
}
/// Cast vector of type __m512 to type __m512d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castps_pd&expand=616)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castps_pd&expand=616)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castps_pd(a: __m512) -> __m512d {
@@ -23704,7 +23704,7 @@ pub unsafe fn _mm512_castps_pd(a: __m512) -> __m512d {
/// Cast vector of type __m512 to type __m512i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castps_si512&expand=619)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castps_si512&expand=619)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castps_si512(a: __m512) -> __m512i {
@@ -23713,61 +23713,61 @@ pub unsafe fn _mm512_castps_si512(a: __m512) -> __m512i {
/// Cast vector of type __m128d to type __m512d; the upper 384 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castpd128_pd512&expand=609)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castpd128_pd512&expand=609)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castpd128_pd512(a: __m128d) -> __m512d {
- simd_shuffle8!(a, _mm_set1_pd(-1.), [0, 1, 2, 2, 2, 2, 2, 2])
+ simd_shuffle!(a, _mm_set1_pd(-1.), [0, 1, 2, 2, 2, 2, 2, 2])
}
/// Cast vector of type __m256d to type __m512d; the upper 256 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castpd256_pd512&expand=611)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castpd256_pd512&expand=611)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castpd256_pd512(a: __m256d) -> __m512d {
- simd_shuffle8!(a, _mm256_set1_pd(-1.), [0, 1, 2, 3, 4, 4, 4, 4])
+ simd_shuffle!(a, _mm256_set1_pd(-1.), [0, 1, 2, 3, 4, 4, 4, 4])
}
/// Cast vector of type __m128d to type __m512d; the upper 384 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_zextpd128_pd512&expand=6193)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_zextpd128_pd512&expand=6193)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_zextpd128_pd512(a: __m128d) -> __m512d {
- simd_shuffle8!(a, _mm_set1_pd(0.), [0, 1, 2, 2, 2, 2, 2, 2])
+ simd_shuffle!(a, _mm_set1_pd(0.), [0, 1, 2, 2, 2, 2, 2, 2])
}
/// Cast vector of type __m256d to type __m512d; the upper 256 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_zextpd256_pd512&expand=6194)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_zextpd256_pd512&expand=6194)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_zextpd256_pd512(a: __m256d) -> __m512d {
- simd_shuffle8!(a, _mm256_set1_pd(0.), [0, 1, 2, 3, 4, 4, 4, 4])
+ simd_shuffle!(a, _mm256_set1_pd(0.), [0, 1, 2, 3, 4, 4, 4, 4])
}
/// Cast vector of type __m512d to type __m128d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castpd512_pd128&expand=612)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castpd512_pd128&expand=612)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castpd512_pd128(a: __m512d) -> __m128d {
- simd_shuffle2!(a, a, [0, 1])
+ simd_shuffle!(a, a, [0, 1])
}
/// Cast vector of type __m512d to type __m256d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castpd512_pd256&expand=613)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castpd512_pd256&expand=613)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castpd512_pd256(a: __m512d) -> __m256d {
- simd_shuffle4!(a, a, [0, 1, 2, 3])
+ simd_shuffle!(a, a, [0, 1, 2, 3])
}
/// Cast vector of type __m512d to type __m512. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castpd_ps&expand=604)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castpd_ps&expand=604)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castpd_ps(a: __m512d) -> __m512 {
@@ -23776,7 +23776,7 @@ pub unsafe fn _mm512_castpd_ps(a: __m512d) -> __m512 {
/// Cast vector of type __m512d to type __m512i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castpd_si512&expand=607)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castpd_si512&expand=607)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castpd_si512(a: __m512d) -> __m512i {
@@ -23785,61 +23785,61 @@ pub unsafe fn _mm512_castpd_si512(a: __m512d) -> __m512i {
/// Cast vector of type __m128i to type __m512i; the upper 384 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castsi128_si512&expand=629)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castsi128_si512&expand=629)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castsi128_si512(a: __m128i) -> __m512i {
- simd_shuffle8!(a, _mm_set1_epi64x(-1), [0, 1, 2, 2, 2, 2, 2, 2])
+ simd_shuffle!(a, _mm_set1_epi64x(-1), [0, 1, 2, 2, 2, 2, 2, 2])
}
/// Cast vector of type __m256i to type __m512i; the upper 256 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castsi256_si512&expand=633)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castsi256_si512&expand=633)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castsi256_si512(a: __m256i) -> __m512i {
- simd_shuffle8!(a, _mm256_set1_epi64x(-1), [0, 1, 2, 3, 4, 4, 4, 4])
+ simd_shuffle!(a, _mm256_set1_epi64x(-1), [0, 1, 2, 3, 4, 4, 4, 4])
}
/// Cast vector of type __m128i to type __m512i; the upper 384 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_zextsi128_si512&expand=6199)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_zextsi128_si512&expand=6199)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_zextsi128_si512(a: __m128i) -> __m512i {
- simd_shuffle8!(a, _mm_set1_epi64x(0), [0, 1, 2, 2, 2, 2, 2, 2])
+ simd_shuffle!(a, _mm_set1_epi64x(0), [0, 1, 2, 2, 2, 2, 2, 2])
}
/// Cast vector of type __m256i to type __m512i; the upper 256 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_zextsi256_si512&expand=6200)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_zextsi256_si512&expand=6200)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_zextsi256_si512(a: __m256i) -> __m512i {
- simd_shuffle8!(a, _mm256_set1_epi64x(0), [0, 1, 2, 3, 4, 4, 4, 4])
+ simd_shuffle!(a, _mm256_set1_epi64x(0), [0, 1, 2, 3, 4, 4, 4, 4])
}
/// Cast vector of type __m512i to type __m128i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castsi512_si128&expand=636)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castsi512_si128&expand=636)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castsi512_si128(a: __m512i) -> __m128i {
- simd_shuffle2!(a, a, [0, 1])
+ simd_shuffle!(a, a, [0, 1])
}
/// Cast vector of type __m512i to type __m256i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castsi512_si256&expand=637)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castsi512_si256&expand=637)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castsi512_si256(a: __m512i) -> __m256i {
- simd_shuffle4!(a, a, [0, 1, 2, 3])
+ simd_shuffle!(a, a, [0, 1, 2, 3])
}
/// Cast vector of type __m512i to type __m512. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castsi512_ps&expand=635)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castsi512_ps&expand=635)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castsi512_ps(a: __m512i) -> __m512 {
@@ -23848,7 +23848,7 @@ pub unsafe fn _mm512_castsi512_ps(a: __m512i) -> __m512 {
/// Cast vector of type __m512i to type __m512d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_castsi512_pd&expand=634)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_castsi512_pd&expand=634)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_castsi512_pd(a: __m512i) -> __m512d {
@@ -23857,7 +23857,7 @@ pub unsafe fn _mm512_castsi512_pd(a: __m512i) -> __m512d {
/// Copy the lower 32-bit integer in a to dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtsi512_si32&expand=1882)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cvtsi512_si32&expand=1882)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(all(test, not(target_os = "windows")), assert_instr(vmovd))]
@@ -23868,19 +23868,19 @@ pub unsafe fn _mm512_cvtsi512_si32(a: __m512i) -> i32 {
/// Broadcast the low packed 32-bit integer from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_broadcastd_epi32&expand=545)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastd_epi32&expand=545)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vbroadcast))] //should be vpbroadcastd
pub unsafe fn _mm512_broadcastd_epi32(a: __m128i) -> __m512i {
let a = _mm512_castsi128_si512(a).as_i32x16();
- let ret: i32x16 = simd_shuffle16!(a, a, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+ let ret: i32x16 = simd_shuffle!(a, a, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
transmute(ret)
}
/// Broadcast the low packed 32-bit integer from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_broadcastd_epi32&expand=546)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcastd_epi32&expand=546)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd
@@ -23891,7 +23891,7 @@ pub unsafe fn _mm512_mask_broadcastd_epi32(src: __m512i, k: __mmask16, a: __m128
/// Broadcast the low packed 32-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_broadcastd_epi32&expand=547)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcastd_epi32&expand=547)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd
@@ -23903,7 +23903,7 @@ pub unsafe fn _mm512_maskz_broadcastd_epi32(k: __mmask16, a: __m128i) -> __m512i
/// Broadcast the low packed 32-bit integer from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_broadcastd_epi32&expand=543)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcastd_epi32&expand=543)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd
@@ -23914,7 +23914,7 @@ pub unsafe fn _mm256_mask_broadcastd_epi32(src: __m256i, k: __mmask8, a: __m128i
/// Broadcast the low packed 32-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_broadcastd_epi32&expand=544)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcastd_epi32&expand=544)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd
@@ -23926,7 +23926,7 @@ pub unsafe fn _mm256_maskz_broadcastd_epi32(k: __mmask8, a: __m128i) -> __m256i
/// Broadcast the low packed 32-bit integer from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_broadcastd_epi32&expand=540)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_broadcastd_epi32&expand=540)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd
@@ -23937,7 +23937,7 @@ pub unsafe fn _mm_mask_broadcastd_epi32(src: __m128i, k: __mmask8, a: __m128i) -
/// Broadcast the low packed 32-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_broadcastd_epi32&expand=541)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_broadcastd_epi32&expand=541)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastd
@@ -23949,17 +23949,17 @@ pub unsafe fn _mm_maskz_broadcastd_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Broadcast the low packed 64-bit integer from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_broadcastq_epi64&expand=560)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastq_epi64&expand=560)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vbroadcas))] //should be vpbroadcastq
pub unsafe fn _mm512_broadcastq_epi64(a: __m128i) -> __m512i {
- simd_shuffle8!(a, a, [0, 0, 0, 0, 0, 0, 0, 0])
+ simd_shuffle!(a, a, [0, 0, 0, 0, 0, 0, 0, 0])
}
/// Broadcast the low packed 64-bit integer from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_broadcastq_epi64&expand=561)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcastq_epi64&expand=561)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq
@@ -23970,7 +23970,7 @@ pub unsafe fn _mm512_mask_broadcastq_epi64(src: __m512i, k: __mmask8, a: __m128i
/// Broadcast the low packed 64-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_broadcastq_epi64&expand=562)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcastq_epi64&expand=562)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq
@@ -23982,7 +23982,7 @@ pub unsafe fn _mm512_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m512i
/// Broadcast the low packed 64-bit integer from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_broadcastq_epi64&expand=558)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcastq_epi64&expand=558)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq
@@ -23993,7 +23993,7 @@ pub unsafe fn _mm256_mask_broadcastq_epi64(src: __m256i, k: __mmask8, a: __m128i
/// Broadcast the low packed 64-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_broadcastq_epi64&expand=559)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcastq_epi64&expand=559)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq
@@ -24005,7 +24005,7 @@ pub unsafe fn _mm256_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m256i
/// Broadcast the low packed 64-bit integer from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_broadcastq_epi64&expand=555)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_broadcastq_epi64&expand=555)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq
@@ -24016,7 +24016,7 @@ pub unsafe fn _mm_mask_broadcastq_epi64(src: __m128i, k: __mmask8, a: __m128i) -
/// Broadcast the low packed 64-bit integer from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_broadcastq_epi64&expand=556)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_broadcastq_epi64&expand=556)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcast))] //should be vpbroadcastq
@@ -24028,17 +24028,17 @@ pub unsafe fn _mm_maskz_broadcastq_epi64(k: __mmask8, a: __m128i) -> __m128i {
/// Broadcast the low single-precision (32-bit) floating-point element from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_broadcastss_ps&expand=578)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastss_ps&expand=578)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
pub unsafe fn _mm512_broadcastss_ps(a: __m128) -> __m512 {
- simd_shuffle16!(a, a, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+ simd_shuffle!(a, a, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
}
/// Broadcast the low single-precision (32-bit) floating-point element from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_broadcastss_ps&expand=579)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcastss_ps&expand=579)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
@@ -24049,7 +24049,7 @@ pub unsafe fn _mm512_mask_broadcastss_ps(src: __m512, k: __mmask16, a: __m128) -
/// Broadcast the low single-precision (32-bit) floating-point element from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_broadcastss_ps&expand=580)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcastss_ps&expand=580)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
@@ -24061,7 +24061,7 @@ pub unsafe fn _mm512_maskz_broadcastss_ps(k: __mmask16, a: __m128) -> __m512 {
/// Broadcast the low single-precision (32-bit) floating-point element from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_broadcastss_ps&expand=576)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcastss_ps&expand=576)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
@@ -24072,7 +24072,7 @@ pub unsafe fn _mm256_mask_broadcastss_ps(src: __m256, k: __mmask8, a: __m128) ->
/// Broadcast the low single-precision (32-bit) floating-point element from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_broadcastss_ps&expand=577)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcastss_ps&expand=577)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
@@ -24084,7 +24084,7 @@ pub unsafe fn _mm256_maskz_broadcastss_ps(k: __mmask8, a: __m128) -> __m256 {
/// Broadcast the low single-precision (32-bit) floating-point element from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_broadcastss_ps&expand=573)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_broadcastss_ps&expand=573)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
@@ -24095,7 +24095,7 @@ pub unsafe fn _mm_mask_broadcastss_ps(src: __m128, k: __mmask8, a: __m128) -> __
/// Broadcast the low single-precision (32-bit) floating-point element from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_broadcastss_ps&expand=574)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_broadcastss_ps&expand=574)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vbroadcastss))]
@@ -24107,17 +24107,17 @@ pub unsafe fn _mm_maskz_broadcastss_ps(k: __mmask8, a: __m128) -> __m128 {
/// Broadcast the low double-precision (64-bit) floating-point element from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_broadcastsd_pd&expand=567)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastsd_pd&expand=567)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vbroadcastsd))]
pub unsafe fn _mm512_broadcastsd_pd(a: __m128d) -> __m512d {
- simd_shuffle8!(a, a, [0, 0, 0, 0, 0, 0, 0, 0])
+ simd_shuffle!(a, a, [0, 0, 0, 0, 0, 0, 0, 0])
}
/// Broadcast the low double-precision (64-bit) floating-point element from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_broadcastsd_pd&expand=568)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcastsd_pd&expand=568)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vbroadcastsd))]
@@ -24128,7 +24128,7 @@ pub unsafe fn _mm512_mask_broadcastsd_pd(src: __m512d, k: __mmask8, a: __m128d)
/// Broadcast the low double-precision (64-bit) floating-point element from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_broadcastsd_pd&expand=569)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcastsd_pd&expand=569)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vbroadcastsd))]
@@ -24140,7 +24140,7 @@ pub unsafe fn _mm512_maskz_broadcastsd_pd(k: __mmask8, a: __m128d) -> __m512d {
/// Broadcast the low double-precision (64-bit) floating-point element from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_broadcastsd_pd&expand=565)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcastsd_pd&expand=565)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vbroadcastsd))]
@@ -24151,7 +24151,7 @@ pub unsafe fn _mm256_mask_broadcastsd_pd(src: __m256d, k: __mmask8, a: __m128d)
/// Broadcast the low double-precision (64-bit) floating-point element from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_broadcastsd_pd&expand=566)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcastsd_pd&expand=566)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vbroadcastsd))]
@@ -24163,18 +24163,18 @@ pub unsafe fn _mm256_maskz_broadcastsd_pd(k: __mmask8, a: __m128d) -> __m256d {
/// Broadcast the 4 packed 32-bit integers from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_broadcast_i32x4&expand=510)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcast_i32x4&expand=510)
#[inline]
#[target_feature(enable = "avx512f")] //msvc: vbroadcasti32x4, linux: vshuf
pub unsafe fn _mm512_broadcast_i32x4(a: __m128i) -> __m512i {
let a = a.as_i32x4();
- let ret: i32x16 = simd_shuffle16!(a, a, [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]);
+ let ret: i32x16 = simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]);
transmute(ret)
}
/// Broadcast the 4 packed 32-bit integers from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_broadcast_i32x4&expand=511)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcast_i32x4&expand=511)
#[inline]
#[target_feature(enable = "avx512f")] //msvc: vbroadcasti32x4, linux: vshuf
pub unsafe fn _mm512_mask_broadcast_i32x4(src: __m512i, k: __mmask16, a: __m128i) -> __m512i {
@@ -24184,7 +24184,7 @@ pub unsafe fn _mm512_mask_broadcast_i32x4(src: __m512i, k: __mmask16, a: __m128i
/// Broadcast the 4 packed 32-bit integers from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_broadcast_i32x4&expand=512)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcast_i32x4&expand=512)
#[inline]
#[target_feature(enable = "avx512f")] //msvc: vbroadcasti32x4, linux: vshuf
pub unsafe fn _mm512_maskz_broadcast_i32x4(k: __mmask16, a: __m128i) -> __m512i {
@@ -24195,18 +24195,18 @@ pub unsafe fn _mm512_maskz_broadcast_i32x4(k: __mmask16, a: __m128i) -> __m512i
/// Broadcast the 4 packed 32-bit integers from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcast_i32x4&expand=507)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcast_i32x4&expand=507)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcasti32x4, linux: vshuf
pub unsafe fn _mm256_broadcast_i32x4(a: __m128i) -> __m256i {
let a = a.as_i32x4();
- let ret: i32x8 = simd_shuffle8!(a, a, [0, 1, 2, 3, 0, 1, 2, 3]);
+ let ret: i32x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3]);
transmute(ret)
}
/// Broadcast the 4 packed 32-bit integers from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_broadcast_i32x4&expand=508)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcast_i32x4&expand=508)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcasti32x4, linux: vshuf
pub unsafe fn _mm256_mask_broadcast_i32x4(src: __m256i, k: __mmask8, a: __m128i) -> __m256i {
@@ -24216,7 +24216,7 @@ pub unsafe fn _mm256_mask_broadcast_i32x4(src: __m256i, k: __mmask8, a: __m128i)
/// Broadcast the 4 packed 32-bit integers from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_broadcast_i32x4&expand=509)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcast_i32x4&expand=509)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcasti32x4, linux: vshuf
pub unsafe fn _mm256_maskz_broadcast_i32x4(k: __mmask8, a: __m128i) -> __m256i {
@@ -24227,16 +24227,16 @@ pub unsafe fn _mm256_maskz_broadcast_i32x4(k: __mmask8, a: __m128i) -> __m256i {
/// Broadcast the 4 packed 64-bit integers from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_broadcast_i64x4&expand=522)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_broadcast_i64x4&expand=522)
#[inline]
#[target_feature(enable = "avx512f")] //msvc: vbroadcasti64x4, linux: vperm
pub unsafe fn _mm512_broadcast_i64x4(a: __m256i) -> __m512i {
- simd_shuffle8!(a, a, [0, 1, 2, 3, 0, 1, 2, 3])
+ simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3])
}
/// Broadcast the 4 packed 64-bit integers from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_mask_broadcast_i64x4&expand=523)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_broadcast_i64x4&expand=523)
#[inline]
#[target_feature(enable = "avx512f")] //msvc: vbroadcasti64x4, linux: vperm
pub unsafe fn _mm512_mask_broadcast_i64x4(src: __m512i, k: __mmask8, a: __m256i) -> __m512i {
@@ -24246,7 +24246,7 @@ pub unsafe fn _mm512_mask_broadcast_i64x4(src: __m512i, k: __mmask8, a: __m256i)
/// Broadcast the 4 packed 64-bit integers from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_maskz_broadcast_i64x4&expand=524)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_broadcast_i64x4&expand=524)
#[inline]
#[target_feature(enable = "avx512f")] //msvc: vbroadcasti64x4, linux: vperm
pub unsafe fn _mm512_maskz_broadcast_i64x4(k: __mmask8, a: __m256i) -> __m512i {
@@ -24257,16 +24257,16 @@ pub unsafe fn _mm512_maskz_broadcast_i64x4(k: __mmask8, a: __m256i) -> __m512i {
/// Broadcast the 4 packed single-precision (32-bit) floating-point elements from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_broadcast_f32x4&expand=483)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcast_f32x4&expand=483)
#[inline]
#[target_feature(enable = "avx512f")] //msvc: vbroadcastf32x4, linux: vshuf
pub unsafe fn _mm512_broadcast_f32x4(a: __m128) -> __m512 {
- simd_shuffle16!(a, a, [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3])
+ simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3])
}
/// Broadcast the 4 packed single-precision (32-bit) floating-point elements from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_broadcast_f32x4&expand=484)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_broadcast_f32x4&expand=484)
#[inline]
#[target_feature(enable = "avx512f")] //msvc: vbroadcastf32x4, linux: vshu
pub unsafe fn _mm512_mask_broadcast_f32x4(src: __m512, k: __mmask16, a: __m128) -> __m512 {
@@ -24276,7 +24276,7 @@ pub unsafe fn _mm512_mask_broadcast_f32x4(src: __m512, k: __mmask16, a: __m128)
/// Broadcast the 4 packed single-precision (32-bit) floating-point elements from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_broadcast_f32x4&expand=485)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_broadcast_f32x4&expand=485)
#[inline]
#[target_feature(enable = "avx512f")] //msvc: vbroadcastf32x4, linux: vshu
pub unsafe fn _mm512_maskz_broadcast_f32x4(k: __mmask16, a: __m128) -> __m512 {
@@ -24287,16 +24287,16 @@ pub unsafe fn _mm512_maskz_broadcast_f32x4(k: __mmask16, a: __m128) -> __m512 {
/// Broadcast the 4 packed single-precision (32-bit) floating-point elements from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcast_f32x4&expand=480)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcast_f32x4&expand=480)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcastf32x4, linux: vshuf
pub unsafe fn _mm256_broadcast_f32x4(a: __m128) -> __m256 {
- simd_shuffle8!(a, a, [0, 1, 2, 3, 0, 1, 2, 3])
+ simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3])
}
/// Broadcast the 4 packed single-precision (32-bit) floating-point elements from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_broadcast_f32x4&expand=481)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_broadcast_f32x4&expand=481)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcastf32x4, linux: vshu
pub unsafe fn _mm256_mask_broadcast_f32x4(src: __m256, k: __mmask8, a: __m128) -> __m256 {
@@ -24306,7 +24306,7 @@ pub unsafe fn _mm256_mask_broadcast_f32x4(src: __m256, k: __mmask8, a: __m128) -
/// Broadcast the 4 packed single-precision (32-bit) floating-point elements from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_broadcast_f32x4&expand=482)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_broadcast_f32x4&expand=482)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")] //msvc: vbroadcastf32x4, linux: vshu
pub unsafe fn _mm256_maskz_broadcast_f32x4(k: __mmask8, a: __m128) -> __m256 {
@@ -24317,16 +24317,16 @@ pub unsafe fn _mm256_maskz_broadcast_f32x4(k: __mmask8, a: __m128) -> __m256 {
/// Broadcast the 4 packed double-precision (64-bit) floating-point elements from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_broadcast_f64x4&expand=495)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_broadcast_f64x4&expand=495)
#[inline]
#[target_feature(enable = "avx512f")] //msvc: vbroadcastf64x4, linux: vperm
pub unsafe fn _mm512_broadcast_f64x4(a: __m256d) -> __m512d {
- simd_shuffle8!(a, a, [0, 1, 2, 3, 0, 1, 2, 3])
+ simd_shuffle!(a, a, [0, 1, 2, 3, 0, 1, 2, 3])
}
/// Broadcast the 4 packed double-precision (64-bit) floating-point elements from a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_mask_broadcast_f64x4&expand=496)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_broadcast_f64x4&expand=496)
#[inline]
#[target_feature(enable = "avx512f")] //msvc: vbroadcastf64x4, linux: vper
pub unsafe fn _mm512_mask_broadcast_f64x4(src: __m512d, k: __mmask8, a: __m256d) -> __m512d {
@@ -24336,7 +24336,7 @@ pub unsafe fn _mm512_mask_broadcast_f64x4(src: __m512d, k: __mmask8, a: __m256d)
/// Broadcast the 4 packed double-precision (64-bit) floating-point elements from a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_maskz_broadcast_f64x4&expand=497)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_broadcast_f64x4&expand=497)
#[inline]
#[target_feature(enable = "avx512f")] //msvc: vbroadcastf64x4, linux: vper
pub unsafe fn _mm512_maskz_broadcast_f64x4(k: __mmask8, a: __m256d) -> __m512d {
@@ -24347,7 +24347,7 @@ pub unsafe fn _mm512_maskz_broadcast_f64x4(k: __mmask8, a: __m256d) -> __m512d {
/// Blend packed 32-bit integers from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_epi32&expand=435)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_blend_epi32&expand=435)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovdqa32))] //should be vpblendmd
@@ -24357,7 +24357,7 @@ pub unsafe fn _mm512_mask_blend_epi32(k: __mmask16, a: __m512i, b: __m512i) -> _
/// Blend packed 32-bit integers from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_epi32&expand=434)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_blend_epi32&expand=434)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqa32))] //should be vpblendmd
@@ -24367,7 +24367,7 @@ pub unsafe fn _mm256_mask_blend_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __
/// Blend packed 32-bit integers from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_epi32&expand=432)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_blend_epi32&expand=432)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqa32))] //should be vpblendmd
@@ -24377,7 +24377,7 @@ pub unsafe fn _mm_mask_blend_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m12
/// Blend packed 64-bit integers from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_epi64&expand=438)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_blend_epi64&expand=438)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovdqa64))] //should be vpblendmq
@@ -24387,7 +24387,7 @@ pub unsafe fn _mm512_mask_blend_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __
/// Blend packed 64-bit integers from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_epi64&expand=437)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_blend_epi64&expand=437)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqa64))] //should be vpblendmq
@@ -24397,7 +24397,7 @@ pub unsafe fn _mm256_mask_blend_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __
/// Blend packed 64-bit integers from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_epi64&expand=436)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_blend_epi64&expand=436)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovdqa64))] //should be vpblendmq
@@ -24407,7 +24407,7 @@ pub unsafe fn _mm_mask_blend_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m12
/// Blend packed single-precision (32-bit) floating-point elements from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_ps&expand=451)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_blend_ps&expand=451)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vpblendmps
@@ -24417,7 +24417,7 @@ pub unsafe fn _mm512_mask_blend_ps(k: __mmask16, a: __m512, b: __m512) -> __m512
/// Blend packed single-precision (32-bit) floating-point elements from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_ps&expand=450)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_blend_ps&expand=450)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vpblendmps
@@ -24427,7 +24427,7 @@ pub unsafe fn _mm256_mask_blend_ps(k: __mmask8, a: __m256, b: __m256) -> __m256
/// Blend packed single-precision (32-bit) floating-point elements from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_ps&expand=448)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_blend_ps&expand=448)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vpblendmps
@@ -24437,7 +24437,7 @@ pub unsafe fn _mm_mask_blend_ps(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Blend packed double-precision (64-bit) floating-point elements from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_pd&expand=446)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_blend_pd&expand=446)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovapd))] //should be vpblendmpd
@@ -24447,7 +24447,7 @@ pub unsafe fn _mm512_mask_blend_pd(k: __mmask8, a: __m512d, b: __m512d) -> __m51
/// Blend packed double-precision (64-bit) floating-point elements from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_pd&expand=445)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_blend_pd&expand=445)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovapd))] //should be vpblendmpd
@@ -24457,7 +24457,7 @@ pub unsafe fn _mm256_mask_blend_pd(k: __mmask8, a: __m256d, b: __m256d) -> __m25
/// Blend packed double-precision (64-bit) floating-point elements from a and b using control mask k, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_pd&expand=443)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_blend_pd&expand=443)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovapd))] //should be vpblendmpd
@@ -24467,80 +24467,80 @@ pub unsafe fn _mm_mask_blend_pd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d
/// Concatenate a and b into a 128-byte immediate result, shift the result right by imm8 32-bit elements, and store the low 64 bytes (16 elements) in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_alignr_epi32&expand=245)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_alignr_epi32&expand=245)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(valignd, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_alignr_epi32<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x16();
let b = b.as_i32x16();
let imm8: i32 = IMM8 % 16;
let r: i32x16 = match imm8 {
- 0 => simd_shuffle16!(
+ 0 => simd_shuffle!(
a,
b,
[16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,],
),
- 1 => simd_shuffle16!(
+ 1 => simd_shuffle!(
a,
b,
[17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0,],
),
- 2 => simd_shuffle16!(
+ 2 => simd_shuffle!(
a,
b,
[18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 1],
),
- 3 => simd_shuffle16!(
+ 3 => simd_shuffle!(
a,
b,
[19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 1, 2],
),
- 4 => simd_shuffle16!(
+ 4 => simd_shuffle!(
a,
b,
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 1, 2, 3],
),
- 5 => simd_shuffle16!(
+ 5 => simd_shuffle!(
a,
b,
[21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4],
),
- 6 => simd_shuffle16!(
+ 6 => simd_shuffle!(
a,
b,
[22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5],
),
- 7 => simd_shuffle16!(
+ 7 => simd_shuffle!(
a,
b,
[23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6],
),
- 8 => simd_shuffle16!(
+ 8 => simd_shuffle!(
a,
b,
[24, 25, 26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6, 7],
),
- 9 => simd_shuffle16!(
+ 9 => simd_shuffle!(
a,
b,
[25, 26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8],
),
- 10 => simd_shuffle16!(a, b, [26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
- 11 => simd_shuffle16!(a, b, [27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
- 12 => simd_shuffle16!(a, b, [28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]),
- 13 => simd_shuffle16!(a, b, [29, 30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
- 14 => simd_shuffle16!(a, b, [30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]),
- _ => simd_shuffle16!(a, b, [31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]),
+ 10 => simd_shuffle!(a, b, [26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
+ 11 => simd_shuffle!(a, b, [27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
+ 12 => simd_shuffle!(a, b, [28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]),
+ 13 => simd_shuffle!(a, b, [29, 30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
+ 14 => simd_shuffle!(a, b, [30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]),
+ _ => simd_shuffle!(a, b, [31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]),
};
transmute(r)
}
/// Concatenate a and b into a 128-byte immediate result, shift the result right by imm8 32-bit elements, and store the low 64 bytes (16 elements) in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_mask_alignr_epi32&expand=246)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_alignr_epi32&expand=246)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(valignd, IMM8 = 1))]
@@ -24551,14 +24551,14 @@ pub unsafe fn _mm512_mask_alignr_epi32<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm512_alignr_epi32::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i32x16(), src.as_i32x16()))
}
/// Concatenate a and b into a 128-byte immediate result, shift the result right by imm8 32-bit elements, and stores the low 64 bytes (16 elements) in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_maskz_alignr_epi32&expand=247)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_alignr_epi32&expand=247)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(valignd, IMM8 = 1))]
@@ -24568,7 +24568,7 @@ pub unsafe fn _mm512_maskz_alignr_epi32<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm512_alignr_epi32::<IMM8>(a, b);
let zero = _mm512_setzero_si512().as_i32x16();
transmute(simd_select_bitmask(k, r.as_i32x16(), zero))
@@ -24576,40 +24576,40 @@ pub unsafe fn _mm512_maskz_alignr_epi32<const IMM8: i32>(
/// Concatenate a and b into a 64-byte immediate result, shift the result right by imm8 32-bit elements, and store the low 32 bytes (8 elements) in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_alignr_epi32&expand=242)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_alignr_epi32&expand=242)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(valignd, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_alignr_epi32<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x8();
let b = b.as_i32x8();
let imm8: i32 = IMM8 % 16;
let r: i32x8 = match imm8 {
- 0 => simd_shuffle8!(a, b, [8, 9, 10, 11, 12, 13, 14, 15]),
- 1 => simd_shuffle8!(a, b, [9, 10, 11, 12, 13, 14, 15, 0]),
- 2 => simd_shuffle8!(a, b, [10, 11, 12, 13, 14, 15, 0, 1]),
- 3 => simd_shuffle8!(a, b, [11, 12, 13, 14, 15, 0, 1, 2]),
- 4 => simd_shuffle8!(a, b, [12, 13, 14, 15, 0, 1, 2, 3]),
- 5 => simd_shuffle8!(a, b, [13, 14, 15, 0, 1, 2, 3, 4]),
- 6 => simd_shuffle8!(a, b, [14, 15, 0, 1, 2, 3, 4, 5]),
- 7 => simd_shuffle8!(a, b, [15, 0, 1, 2, 3, 4, 5, 6]),
- 8 => simd_shuffle8!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
- 9 => simd_shuffle8!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
- 10 => simd_shuffle8!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
- 11 => simd_shuffle8!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
- 12 => simd_shuffle8!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
- 13 => simd_shuffle8!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
- 14 => simd_shuffle8!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
- _ => simd_shuffle8!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
+ 0 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15]),
+ 1 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 0]),
+ 2 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 0, 1]),
+ 3 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 0, 1, 2]),
+ 4 => simd_shuffle!(a, b, [12, 13, 14, 15, 0, 1, 2, 3]),
+ 5 => simd_shuffle!(a, b, [13, 14, 15, 0, 1, 2, 3, 4]),
+ 6 => simd_shuffle!(a, b, [14, 15, 0, 1, 2, 3, 4, 5]),
+ 7 => simd_shuffle!(a, b, [15, 0, 1, 2, 3, 4, 5, 6]),
+ 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]),
+ 9 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]),
+ 10 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]),
+ 11 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]),
+ 12 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]),
+ 13 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]),
+ 14 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]),
+ _ => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]),
};
transmute(r)
}
/// Concatenate a and b into a 64-byte immediate result, shift the result right by imm8 32-bit elements, and store the low 32 bytes (8 elements) in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_alignr_epi32&expand=243)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_alignr_epi32&expand=243)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(valignd, IMM8 = 1))]
@@ -24620,14 +24620,14 @@ pub unsafe fn _mm256_mask_alignr_epi32<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm256_alignr_epi32::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i32x8(), src.as_i32x8()))
}
/// Concatenate a and b into a 64-byte immediate result, shift the result right by imm8 32-bit elements, and store the low 32 bytes (8 elements) in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_alignr_epi32&expand=244)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_alignr_epi32&expand=244)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(valignd, IMM8 = 1))]
@@ -24637,7 +24637,7 @@ pub unsafe fn _mm256_maskz_alignr_epi32<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm256_alignr_epi32::<IMM8>(a, b);
let zero = _mm256_setzero_si256().as_i32x8();
transmute(simd_select_bitmask(k, r.as_i32x8(), zero))
@@ -24645,32 +24645,32 @@ pub unsafe fn _mm256_maskz_alignr_epi32<const IMM8: i32>(
/// Concatenate a and b into a 32-byte immediate result, shift the result right by imm8 32-bit elements, and store the low 16 bytes (4 elements) in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_alignr_epi32&expand=239)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_epi32&expand=239)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] //should be valignd
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_alignr_epi32<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x4();
let b = b.as_i32x4();
let imm8: i32 = IMM8 % 8;
let r: i32x4 = match imm8 {
- 0 => simd_shuffle4!(a, b, [4, 5, 6, 7]),
- 1 => simd_shuffle4!(a, b, [5, 6, 7, 0]),
- 2 => simd_shuffle4!(a, b, [6, 7, 0, 1]),
- 3 => simd_shuffle4!(a, b, [7, 0, 1, 2]),
- 4 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
- 5 => simd_shuffle4!(a, b, [1, 2, 3, 0]),
- 6 => simd_shuffle4!(a, b, [2, 3, 0, 1]),
- _ => simd_shuffle4!(a, b, [3, 0, 1, 2]),
+ 0 => simd_shuffle!(a, b, [4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [5, 6, 7, 0]),
+ 2 => simd_shuffle!(a, b, [6, 7, 0, 1]),
+ 3 => simd_shuffle!(a, b, [7, 0, 1, 2]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3]),
+ 5 => simd_shuffle!(a, b, [1, 2, 3, 0]),
+ 6 => simd_shuffle!(a, b, [2, 3, 0, 1]),
+ _ => simd_shuffle!(a, b, [3, 0, 1, 2]),
};
transmute(r)
}
/// Concatenate a and b into a 32-byte immediate result, shift the result right by imm8 32-bit elements, and store the low 16 bytes (4 elements) in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_alignr_epi32&expand=240)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_alignr_epi32&expand=240)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(valignd, IMM8 = 1))]
@@ -24681,14 +24681,14 @@ pub unsafe fn _mm_mask_alignr_epi32<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm_alignr_epi32::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i32x4(), src.as_i32x4()))
}
/// Concatenate a and b into a 32-byte immediate result, shift the result right by imm8 32-bit elements, and store the low 16 bytes (4 elements) in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_alignr_epi32&expand=241)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_alignr_epi32&expand=241)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(valignd, IMM8 = 1))]
@@ -24698,7 +24698,7 @@ pub unsafe fn _mm_maskz_alignr_epi32<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm_alignr_epi32::<IMM8>(a, b);
let zero = _mm_setzero_si128().as_i32x4();
transmute(simd_select_bitmask(k, r.as_i32x4(), zero))
@@ -24706,30 +24706,30 @@ pub unsafe fn _mm_maskz_alignr_epi32<const IMM8: i32>(
/// Concatenate a and b into a 128-byte immediate result, shift the result right by imm8 64-bit elements, and store the low 64 bytes (8 elements) in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_alignr_epi64&expand=254)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_alignr_epi64&expand=254)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(valignq, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_alignr_epi64<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8: i32 = IMM8 % 8;
let r: i64x8 = match imm8 {
- 0 => simd_shuffle8!(a, b, [8, 9, 10, 11, 12, 13, 14, 15]),
- 1 => simd_shuffle8!(a, b, [9, 10, 11, 12, 13, 14, 15, 0]),
- 2 => simd_shuffle8!(a, b, [10, 11, 12, 13, 14, 15, 0, 1]),
- 3 => simd_shuffle8!(a, b, [11, 12, 13, 14, 15, 0, 1, 2]),
- 4 => simd_shuffle8!(a, b, [12, 13, 14, 15, 0, 1, 2, 3]),
- 5 => simd_shuffle8!(a, b, [13, 14, 15, 0, 1, 2, 3, 4]),
- 6 => simd_shuffle8!(a, b, [14, 15, 0, 1, 2, 3, 4, 5]),
- _ => simd_shuffle8!(a, b, [15, 0, 1, 2, 3, 4, 5, 6]),
+ 0 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15]),
+ 1 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 0]),
+ 2 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 0, 1]),
+ 3 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 0, 1, 2]),
+ 4 => simd_shuffle!(a, b, [12, 13, 14, 15, 0, 1, 2, 3]),
+ 5 => simd_shuffle!(a, b, [13, 14, 15, 0, 1, 2, 3, 4]),
+ 6 => simd_shuffle!(a, b, [14, 15, 0, 1, 2, 3, 4, 5]),
+ _ => simd_shuffle!(a, b, [15, 0, 1, 2, 3, 4, 5, 6]),
};
transmute(r)
}
/// Concatenate a and b into a 128-byte immediate result, shift the result right by imm8 64-bit elements, and store the low 64 bytes (8 elements) in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_mask_alignr_epi64&expand=255)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask_alignr_epi64&expand=255)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(valignq, IMM8 = 1))]
@@ -24740,14 +24740,14 @@ pub unsafe fn _mm512_mask_alignr_epi64<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm512_alignr_epi64::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i64x8(), src.as_i64x8()))
}
/// Concatenate a and b into a 128-byte immediate result, shift the result right by imm8 64-bit elements, and stores the low 64 bytes (8 elements) in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_maskz_alignr_epi64&expand=256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_maskz_alignr_epi64&expand=256)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(valignq, IMM8 = 1))]
@@ -24757,7 +24757,7 @@ pub unsafe fn _mm512_maskz_alignr_epi64<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm512_alignr_epi64::<IMM8>(a, b);
let zero = _mm512_setzero_si512().as_i64x8();
transmute(simd_select_bitmask(k, r.as_i64x8(), zero))
@@ -24765,30 +24765,30 @@ pub unsafe fn _mm512_maskz_alignr_epi64<const IMM8: i32>(
/// Concatenate a and b into a 64-byte immediate result, shift the result right by imm8 64-bit elements, and store the low 32 bytes (4 elements) in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_alignr_epi64&expand=251)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_alignr_epi64&expand=251)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(valignq, IMM8 = 1))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_alignr_epi64<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8: i32 = IMM8 % 8;
let r: i64x4 = match imm8 {
- 0 => simd_shuffle4!(a, b, [4, 5, 6, 7]),
- 1 => simd_shuffle4!(a, b, [5, 6, 7, 0]),
- 2 => simd_shuffle4!(a, b, [6, 7, 0, 1]),
- 3 => simd_shuffle4!(a, b, [7, 0, 1, 2]),
- 4 => simd_shuffle4!(a, b, [0, 1, 2, 3]),
- 5 => simd_shuffle4!(a, b, [1, 2, 3, 4]),
- 6 => simd_shuffle4!(a, b, [2, 3, 4, 5]),
- _ => simd_shuffle4!(a, b, [3, 4, 5, 6]),
+ 0 => simd_shuffle!(a, b, [4, 5, 6, 7]),
+ 1 => simd_shuffle!(a, b, [5, 6, 7, 0]),
+ 2 => simd_shuffle!(a, b, [6, 7, 0, 1]),
+ 3 => simd_shuffle!(a, b, [7, 0, 1, 2]),
+ 4 => simd_shuffle!(a, b, [0, 1, 2, 3]),
+ 5 => simd_shuffle!(a, b, [1, 2, 3, 4]),
+ 6 => simd_shuffle!(a, b, [2, 3, 4, 5]),
+ _ => simd_shuffle!(a, b, [3, 4, 5, 6]),
};
transmute(r)
}
/// Concatenate a and b into a 64-byte immediate result, shift the result right by imm8 64-bit elements, and store the low 32 bytes (4 elements) in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_alignr_epi64&expand=252)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_alignr_epi64&expand=252)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(valignq, IMM8 = 1))]
@@ -24799,14 +24799,14 @@ pub unsafe fn _mm256_mask_alignr_epi64<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm256_alignr_epi64::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i64x4(), src.as_i64x4()))
}
/// Concatenate a and b into a 64-byte immediate result, shift the result right by imm8 64-bit elements, and store the low 32 bytes (4 elements) in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_alignr_epi64&expand=253)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_alignr_epi64&expand=253)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(valignq, IMM8 = 1))]
@@ -24816,7 +24816,7 @@ pub unsafe fn _mm256_maskz_alignr_epi64<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm256_alignr_epi64::<IMM8>(a, b);
let zero = _mm256_setzero_si256().as_i64x4();
transmute(simd_select_bitmask(k, r.as_i64x4(), zero))
@@ -24824,26 +24824,26 @@ pub unsafe fn _mm256_maskz_alignr_epi64<const IMM8: i32>(
/// Concatenate a and b into a 32-byte immediate result, shift the result right by imm8 64-bit elements, and store the low 16 bytes (2 elements) in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_alignr_epi64&expand=248)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_epi64&expand=248)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpalignr, IMM8 = 1))] //should be valignq
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_alignr_epi64<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8: i32 = IMM8 % 4;
let r: i64x2 = match imm8 {
- 0 => simd_shuffle2!(a, b, [2, 3]),
- 1 => simd_shuffle2!(a, b, [3, 0]),
- 2 => simd_shuffle2!(a, b, [0, 1]),
- _ => simd_shuffle2!(a, b, [1, 2]),
+ 0 => simd_shuffle!(a, b, [2, 3]),
+ 1 => simd_shuffle!(a, b, [3, 0]),
+ 2 => simd_shuffle!(a, b, [0, 1]),
+ _ => simd_shuffle!(a, b, [1, 2]),
};
transmute(r)
}
/// Concatenate a and b into a 32-byte immediate result, shift the result right by imm8 64-bit elements, and store the low 16 bytes (2 elements) in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_alignr_epi64&expand=249)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_alignr_epi64&expand=249)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(valignq, IMM8 = 1))]
@@ -24854,14 +24854,14 @@ pub unsafe fn _mm_mask_alignr_epi64<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm_alignr_epi64::<IMM8>(a, b);
transmute(simd_select_bitmask(k, r.as_i64x2(), src.as_i64x2()))
}
/// Concatenate a and b into a 32-byte immediate result, shift the result right by imm8 64-bit elements, and store the low 16 bytes (2 elements) in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_alignr_epi64&expand=250)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_alignr_epi64&expand=250)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(valignq, IMM8 = 1))]
@@ -24871,7 +24871,7 @@ pub unsafe fn _mm_maskz_alignr_epi64<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let r = _mm_alignr_epi64::<IMM8>(a, b);
let zero = _mm_setzero_si128().as_i64x2();
transmute(simd_select_bitmask(k, r.as_i64x2(), zero))
@@ -24879,7 +24879,7 @@ pub unsafe fn _mm_maskz_alignr_epi64<const IMM8: i32>(
/// Compute the bitwise AND of packed 32-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_and_epi32&expand=272)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_and_epi32&expand=272)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandq))] //should be vpandd, but generate vpandq
@@ -24889,7 +24889,7 @@ pub unsafe fn _mm512_and_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Performs element-by-element bitwise AND between packed 32-bit integer elements of a and b, storing the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_and_epi32&expand=273)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_and_epi32&expand=273)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandd))]
@@ -24900,7 +24900,7 @@ pub unsafe fn _mm512_mask_and_epi32(src: __m512i, k: __mmask16, a: __m512i, b: _
/// Compute the bitwise AND of packed 32-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_and_epi32&expand=274)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_and_epi32&expand=274)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandd))]
@@ -24912,7 +24912,7 @@ pub unsafe fn _mm512_maskz_and_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __
/// Performs element-by-element bitwise AND between packed 32-bit integer elements of a and b, storing the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_and_epi32&expand=270)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_and_epi32&expand=270)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandd))]
@@ -24923,7 +24923,7 @@ pub unsafe fn _mm256_mask_and_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Compute the bitwise AND of packed 32-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_and_epi32&expand=271)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_and_epi32&expand=271)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandd))]
@@ -24935,7 +24935,7 @@ pub unsafe fn _mm256_maskz_and_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Performs element-by-element bitwise AND between packed 32-bit integer elements of a and b, storing the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_and_epi32&expand=268)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_and_epi32&expand=268)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandd))]
@@ -24946,7 +24946,7 @@ pub unsafe fn _mm_mask_and_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compute the bitwise AND of packed 32-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_and_epi32&expand=269)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_and_epi32&expand=269)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandd))]
@@ -24958,7 +24958,7 @@ pub unsafe fn _mm_maskz_and_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compute the bitwise AND of 512 bits (composed of packed 64-bit integers) in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_and_epi64&expand=279)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_and_epi64&expand=279)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandq))]
@@ -24968,7 +24968,7 @@ pub unsafe fn _mm512_and_epi64(a: __m512i, b: __m512i) -> __m512i {
/// Compute the bitwise AND of packed 64-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_and_epi64&expand=280)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_and_epi64&expand=280)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandq))]
@@ -24979,7 +24979,7 @@ pub unsafe fn _mm512_mask_and_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __
/// Compute the bitwise AND of packed 64-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_and_epi64&expand=281)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_and_epi64&expand=281)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandq))]
@@ -24991,7 +24991,7 @@ pub unsafe fn _mm512_maskz_and_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m
/// Compute the bitwise AND of packed 64-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_and_epi64&expand=277)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_and_epi64&expand=277)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandq))]
@@ -25002,7 +25002,7 @@ pub unsafe fn _mm256_mask_and_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Compute the bitwise AND of packed 64-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_and_epi64&expand=278)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_and_epi64&expand=278)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandq))]
@@ -25014,7 +25014,7 @@ pub unsafe fn _mm256_maskz_and_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Compute the bitwise AND of packed 64-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_and_epi64&expand=275)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_and_epi64&expand=275)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandq))]
@@ -25025,7 +25025,7 @@ pub unsafe fn _mm_mask_and_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compute the bitwise AND of packed 64-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_and_epi64&expand=276)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_and_epi64&expand=276)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandq))]
@@ -25037,7 +25037,7 @@ pub unsafe fn _mm_maskz_and_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compute the bitwise AND of 512 bits (representing integer data) in a and b, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_and_si512&expand=302)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_and_si512&expand=302)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandq))]
@@ -25047,7 +25047,7 @@ pub unsafe fn _mm512_and_si512(a: __m512i, b: __m512i) -> __m512i {
/// Compute the bitwise OR of packed 32-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_or_epi32&expand=4042)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_or_epi32&expand=4042)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vporq))]
@@ -25057,7 +25057,7 @@ pub unsafe fn _mm512_or_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Compute the bitwise OR of packed 32-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_or_epi32&expand=4040)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_or_epi32&expand=4040)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpord))]
@@ -25068,7 +25068,7 @@ pub unsafe fn _mm512_mask_or_epi32(src: __m512i, k: __mmask16, a: __m512i, b: __
/// Compute the bitwise OR of packed 32-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_or_epi32&expand=4041)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_or_epi32&expand=4041)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpord))]
@@ -25080,7 +25080,7 @@ pub unsafe fn _mm512_maskz_or_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __m
/// Compute the bitwise OR of packed 32-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_or_epi32&expand=4039)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_or_epi32&expand=4039)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vor))] //should be vpord
@@ -25090,7 +25090,7 @@ pub unsafe fn _mm256_or_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Compute the bitwise OR of packed 32-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_or_epi32&expand=4037)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_or_epi32&expand=4037)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpord))]
@@ -25101,7 +25101,7 @@ pub unsafe fn _mm256_mask_or_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __m
/// Compute the bitwise OR of packed 32-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_or_epi32&expand=4038)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_or_epi32&expand=4038)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpord))]
@@ -25113,7 +25113,7 @@ pub unsafe fn _mm256_maskz_or_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m2
/// Compute the bitwise OR of packed 32-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_or_epi32&expand=4036)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_epi32&expand=4036)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vor))] //should be vpord
@@ -25123,7 +25123,7 @@ pub unsafe fn _mm_or_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Compute the bitwise OR of packed 32-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_or_epi32&expand=4034)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_or_epi32&expand=4034)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpord))]
@@ -25134,7 +25134,7 @@ pub unsafe fn _mm_mask_or_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m128
/// Compute the bitwise OR of packed 32-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_or_epi32&expand=4035)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_or_epi32&expand=4035)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpord))]
@@ -25146,7 +25146,7 @@ pub unsafe fn _mm_maskz_or_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128i
/// Compute the bitwise OR of packed 64-bit integers in a and b, and store the resut in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_or_epi64&expand=4051)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_or_epi64&expand=4051)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vporq))]
@@ -25156,7 +25156,7 @@ pub unsafe fn _mm512_or_epi64(a: __m512i, b: __m512i) -> __m512i {
/// Compute the bitwise OR of packed 64-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_or_epi64&expand=4049)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_or_epi64&expand=4049)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vporq))]
@@ -25167,7 +25167,7 @@ pub unsafe fn _mm512_mask_or_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __m
/// Compute the bitwise OR of packed 64-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_or_epi64&expand=4050)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_or_epi64&expand=4050)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vporq))]
@@ -25179,7 +25179,7 @@ pub unsafe fn _mm512_maskz_or_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m5
/// Compute the bitwise OR of packed 64-bit integers in a and b, and store the resut in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_or_epi64&expand=4048)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_or_epi64&expand=4048)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vor))] //should be vporq
@@ -25189,7 +25189,7 @@ pub unsafe fn _mm256_or_epi64(a: __m256i, b: __m256i) -> __m256i {
/// Compute the bitwise OR of packed 64-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_or_epi64&expand=4046)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_or_epi64&expand=4046)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vporq))]
@@ -25200,7 +25200,7 @@ pub unsafe fn _mm256_mask_or_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __m
/// Compute the bitwise OR of packed 64-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_or_epi64&expand=4047)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_or_epi64&expand=4047)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vporq))]
@@ -25212,7 +25212,7 @@ pub unsafe fn _mm256_maskz_or_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m2
/// Compute the bitwise OR of packed 64-bit integers in a and b, and store the resut in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_or_epi64&expand=4045)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_epi64&expand=4045)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vor))] //should be vporq
@@ -25222,7 +25222,7 @@ pub unsafe fn _mm_or_epi64(a: __m128i, b: __m128i) -> __m128i {
/// Compute the bitwise OR of packed 64-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_or_epi64&expand=4043)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_or_epi64&expand=4043)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vporq))]
@@ -25233,7 +25233,7 @@ pub unsafe fn _mm_mask_or_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m128
/// Compute the bitwise OR of packed 64-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_or_epi64&expand=4044)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_or_epi64&expand=4044)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vporq))]
@@ -25245,7 +25245,7 @@ pub unsafe fn _mm_maskz_or_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128i
/// Compute the bitwise OR of 512 bits (representing integer data) in a and b, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_or_si512&expand=4072)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_or_si512&expand=4072)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vporq))]
@@ -25255,7 +25255,7 @@ pub unsafe fn _mm512_or_si512(a: __m512i, b: __m512i) -> __m512i {
/// Compute the bitwise XOR of packed 32-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_xor_epi32&expand=6142)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_xor_epi32&expand=6142)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpxorq))] //should be vpxord
@@ -25265,7 +25265,7 @@ pub unsafe fn _mm512_xor_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Compute the bitwise XOR of packed 32-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_xor_epi32&expand=6140)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_xor_epi32&expand=6140)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpxord))]
@@ -25276,7 +25276,7 @@ pub unsafe fn _mm512_mask_xor_epi32(src: __m512i, k: __mmask16, a: __m512i, b: _
/// Compute the bitwise XOR of packed 32-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_xor_epi32&expand=6141)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_xor_epi32&expand=6141)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpxord))]
@@ -25288,7 +25288,7 @@ pub unsafe fn _mm512_maskz_xor_epi32(k: __mmask16, a: __m512i, b: __m512i) -> __
/// Compute the bitwise XOR of packed 32-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_xor_epi32&expand=6139)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_xor_epi32&expand=6139)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vxor))] //should be vpxord
@@ -25298,7 +25298,7 @@ pub unsafe fn _mm256_xor_epi32(a: __m256i, b: __m256i) -> __m256i {
/// Compute the bitwise XOR of packed 32-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_xor_epi32&expand=6137)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_xor_epi32&expand=6137)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpxord))]
@@ -25309,7 +25309,7 @@ pub unsafe fn _mm256_mask_xor_epi32(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Compute the bitwise XOR of packed 32-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_xor_epi32&expand=6138)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_xor_epi32&expand=6138)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpxord))]
@@ -25321,7 +25321,7 @@ pub unsafe fn _mm256_maskz_xor_epi32(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Compute the bitwise XOR of packed 32-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_xor_epi32&expand=6136)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_epi32&expand=6136)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vxor))] //should be vpxord
@@ -25331,7 +25331,7 @@ pub unsafe fn _mm_xor_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Compute the bitwise XOR of packed 32-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_xor_epi32&expand=6134)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_xor_epi32&expand=6134)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpxord))]
@@ -25342,7 +25342,7 @@ pub unsafe fn _mm_mask_xor_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compute the bitwise XOR of packed 32-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_xor_epi32&expand=6135)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_xor_epi32&expand=6135)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpxord))]
@@ -25354,7 +25354,7 @@ pub unsafe fn _mm_maskz_xor_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compute the bitwise XOR of packed 64-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_xor_epi64&expand=6151)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_xor_epi64&expand=6151)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpxorq))]
@@ -25364,7 +25364,7 @@ pub unsafe fn _mm512_xor_epi64(a: __m512i, b: __m512i) -> __m512i {
/// Compute the bitwise XOR of packed 64-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_xor_epi64&expand=6149)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_xor_epi64&expand=6149)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpxorq))]
@@ -25375,7 +25375,7 @@ pub unsafe fn _mm512_mask_xor_epi64(src: __m512i, k: __mmask8, a: __m512i, b: __
/// Compute the bitwise XOR of packed 64-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_xor_epi64&expand=6150)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_xor_epi64&expand=6150)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpxorq))]
@@ -25387,7 +25387,7 @@ pub unsafe fn _mm512_maskz_xor_epi64(k: __mmask8, a: __m512i, b: __m512i) -> __m
/// Compute the bitwise XOR of packed 64-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_xor_epi64&expand=6148)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_xor_epi64&expand=6148)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vxor))] //should be vpxorq
@@ -25397,7 +25397,7 @@ pub unsafe fn _mm256_xor_epi64(a: __m256i, b: __m256i) -> __m256i {
/// Compute the bitwise XOR of packed 64-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_xor_epi64&expand=6146)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_xor_epi64&expand=6146)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpxorq))]
@@ -25408,7 +25408,7 @@ pub unsafe fn _mm256_mask_xor_epi64(src: __m256i, k: __mmask8, a: __m256i, b: __
/// Compute the bitwise XOR of packed 64-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_xor_epi64&expand=6147)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_xor_epi64&expand=6147)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpxorq))]
@@ -25420,7 +25420,7 @@ pub unsafe fn _mm256_maskz_xor_epi64(k: __mmask8, a: __m256i, b: __m256i) -> __m
/// Compute the bitwise XOR of packed 64-bit integers in a and b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_xor_epi64&expand=6145)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_epi64&expand=6145)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vxor))] //should be vpxorq
@@ -25430,7 +25430,7 @@ pub unsafe fn _mm_xor_epi64(a: __m128i, b: __m128i) -> __m128i {
/// Compute the bitwise XOR of packed 64-bit integers in a and b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_xor_epi64&expand=6143)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_xor_epi64&expand=6143)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpxorq))]
@@ -25441,7 +25441,7 @@ pub unsafe fn _mm_mask_xor_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __m12
/// Compute the bitwise XOR of packed 64-bit integers in a and b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_xor_epi64&expand=6144)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_xor_epi64&expand=6144)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpxorq))]
@@ -25453,7 +25453,7 @@ pub unsafe fn _mm_maskz_xor_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m128
/// Compute the bitwise XOR of 512 bits (representing integer data) in a and b, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_xor_si512&expand=6172)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_xor_si512&expand=6172)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpxorq))]
@@ -25463,7 +25463,7 @@ pub unsafe fn _mm512_xor_si512(a: __m512i, b: __m512i) -> __m512i {
/// Compute the bitwise NOT of packed 32-bit integers in a and then AND with b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_andnot_epi32&expand=310)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_andnot_epi32&expand=310)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandnq))] //should be vpandnd
@@ -25473,7 +25473,7 @@ pub unsafe fn _mm512_andnot_epi32(a: __m512i, b: __m512i) -> __m512i {
/// Compute the bitwise NOT of packed 32-bit integers in a and then AND with b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_andnot_epi32&expand=311)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_andnot_epi32&expand=311)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandnd))]
@@ -25489,7 +25489,7 @@ pub unsafe fn _mm512_mask_andnot_epi32(
/// Compute the bitwise NOT of packed 32-bit integers in a and then AND with b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_andnot_epi32&expand=312)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_andnot_epi32&expand=312)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandnd))]
@@ -25501,7 +25501,7 @@ pub unsafe fn _mm512_maskz_andnot_epi32(k: __mmask16, a: __m512i, b: __m512i) ->
/// Compute the bitwise NOT of packed 32-bit integers in a and then AND with b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_andnot_epi32&expand=308)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_andnot_epi32&expand=308)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandnd))]
@@ -25518,7 +25518,7 @@ pub unsafe fn _mm256_mask_andnot_epi32(
/// Compute the bitwise NOT of packed 32-bit integers in a and then AND with b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_andnot_epi32&expand=309)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_andnot_epi32&expand=309)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandnd))]
@@ -25531,7 +25531,7 @@ pub unsafe fn _mm256_maskz_andnot_epi32(k: __mmask8, a: __m256i, b: __m256i) ->
/// Compute the bitwise NOT of packed 32-bit integers in a and then AND with b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_andnot_epi32&expand=306)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_andnot_epi32&expand=306)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandnd))]
@@ -25543,7 +25543,7 @@ pub unsafe fn _mm_mask_andnot_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __
/// Compute the bitwise NOT of packed 32-bit integers in a and then AND with b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_andnot_epi32&expand=307)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_andnot_epi32&expand=307)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandnd))]
@@ -25556,7 +25556,7 @@ pub unsafe fn _mm_maskz_andnot_epi32(k: __mmask8, a: __m128i, b: __m128i) -> __m
/// Compute the bitwise NOT of 512 bits (composed of packed 64-bit integers) in a and then AND with b, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_andnot_epi64&expand=317)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_andnot_epi64&expand=317)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandnq))] //should be vpandnd
@@ -25566,7 +25566,7 @@ pub unsafe fn _mm512_andnot_epi64(a: __m512i, b: __m512i) -> __m512i {
/// Compute the bitwise NOT of packed 64-bit integers in a and then AND with b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_andnot_epi64&expand=318)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_andnot_epi64&expand=318)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandnq))]
@@ -25582,7 +25582,7 @@ pub unsafe fn _mm512_mask_andnot_epi64(
/// Compute the bitwise NOT of packed 64-bit integers in a and then AND with b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_andnot_epi64&expand=319)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_andnot_epi64&expand=319)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandnq))]
@@ -25594,7 +25594,7 @@ pub unsafe fn _mm512_maskz_andnot_epi64(k: __mmask8, a: __m512i, b: __m512i) ->
/// Compute the bitwise NOT of packed 64-bit integers in a and then AND with b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_andnot_epi64&expand=315)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_andnot_epi64&expand=315)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandnq))]
@@ -25611,7 +25611,7 @@ pub unsafe fn _mm256_mask_andnot_epi64(
/// Compute the bitwise NOT of packed 64-bit integers in a and then AND with b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_andnot_epi64&expand=316)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_andnot_epi64&expand=316)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandnq))]
@@ -25624,7 +25624,7 @@ pub unsafe fn _mm256_maskz_andnot_epi64(k: __mmask8, a: __m256i, b: __m256i) ->
/// Compute the bitwise NOT of packed 64-bit integers in a and then AND with b, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_andnot_epi64&expand=313)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_andnot_epi64&expand=313)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandnq))]
@@ -25636,7 +25636,7 @@ pub unsafe fn _mm_mask_andnot_epi64(src: __m128i, k: __mmask8, a: __m128i, b: __
/// Compute the bitwise NOT of packed 64-bit integers in a and then AND with b, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_andnot_epi64&expand=314)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_andnot_epi64&expand=314)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpandnq))]
@@ -25649,7 +25649,7 @@ pub unsafe fn _mm_maskz_andnot_epi64(k: __mmask8, a: __m128i, b: __m128i) -> __m
/// Compute the bitwise NOT of 512 bits (representing integer data) in a and then AND with b, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_andnot_si512&expand=340)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_andnot_si512&expand=340)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpandnq))]
@@ -25659,7 +25659,7 @@ pub unsafe fn _mm512_andnot_si512(a: __m512i, b: __m512i) -> __m512i {
/// Compute the bitwise AND of 16-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=kand_mask16&expand=3212)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=kand_mask16&expand=3212)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(and))] // generate normal and code instead of kandw
@@ -25669,7 +25669,7 @@ pub unsafe fn _kand_mask16(a: __mmask16, b: __mmask16) -> __mmask16 {
/// Compute the bitwise AND of 16-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_kand&expand=3210)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kand&expand=3210)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(and))] // generate normal and code instead of kandw
@@ -25679,7 +25679,7 @@ pub unsafe fn _mm512_kand(a: __mmask16, b: __mmask16) -> __mmask16 {
/// Compute the bitwise OR of 16-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=kor_mask16&expand=3239)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=kor_mask16&expand=3239)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(or))] // generate normal or code instead of korw
@@ -25689,7 +25689,7 @@ pub unsafe fn _kor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 {
/// Compute the bitwise OR of 16-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_kor&expand=3237)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kor&expand=3237)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(or))] // generate normal or code instead of korw
@@ -25699,7 +25699,7 @@ pub unsafe fn _mm512_kor(a: __mmask16, b: __mmask16) -> __mmask16 {
/// Compute the bitwise XOR of 16-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=kxor_mask16&expand=3291)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=kxor_mask16&expand=3291)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(xor))] // generate normal xor code instead of kxorw
@@ -25709,7 +25709,7 @@ pub unsafe fn _kxor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 {
/// Compute the bitwise XOR of 16-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_kxor&expand=3289)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kxor&expand=3289)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(xor))] // generate normal xor code instead of kxorw
@@ -25719,7 +25719,7 @@ pub unsafe fn _mm512_kxor(a: __mmask16, b: __mmask16) -> __mmask16 {
/// Compute the bitwise NOT of 16-bit mask a, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=knot_mask16&expand=3233)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=knot_mask16&expand=3233)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _knot_mask16(a: __mmask16) -> __mmask16 {
@@ -25728,7 +25728,7 @@ pub unsafe fn _knot_mask16(a: __mmask16) -> __mmask16 {
/// Compute the bitwise NOT of 16-bit mask a, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_knot&expand=3231)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_knot&expand=3231)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_knot(a: __mmask16) -> __mmask16 {
@@ -25737,7 +25737,7 @@ pub unsafe fn _mm512_knot(a: __mmask16) -> __mmask16 {
/// Compute the bitwise NOT of 16-bit masks a and then AND with b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=kandn_mask16&expand=3218)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=kandn_mask16&expand=3218)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(not))] // generate normal and, not code instead of kandnw
@@ -25747,7 +25747,7 @@ pub unsafe fn _kandn_mask16(a: __mmask16, b: __mmask16) -> __mmask16 {
/// Compute the bitwise NOT of 16-bit masks a and then AND with b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_kandn&expand=3216)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kandn&expand=3216)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(not))] // generate normal and code instead of kandw
@@ -25757,7 +25757,7 @@ pub unsafe fn _mm512_kandn(a: __mmask16, b: __mmask16) -> __mmask16 {
/// Compute the bitwise XNOR of 16-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=kxnor_mask16&expand=3285)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=kxnor_mask16&expand=3285)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(xor))] // generate normal xor, not code instead of kxnorw
@@ -25767,7 +25767,7 @@ pub unsafe fn _kxnor_mask16(a: __mmask16, b: __mmask16) -> __mmask16 {
/// Compute the bitwise XNOR of 16-bit masks a and b, and store the result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_kxnor&expand=3283)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kxnor&expand=3283)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(xor))] // generate normal and code instead of kandw
@@ -25777,7 +25777,7 @@ pub unsafe fn _mm512_kxnor(a: __mmask16, b: __mmask16) -> __mmask16 {
/// Copy 16-bit mask a to k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm512_kmov&expand=3228)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm512_kmov&expand=3228)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kmovw
@@ -25788,7 +25788,7 @@ pub unsafe fn _mm512_kmov(a: __mmask16) -> __mmask16 {
/// Converts integer mask into bitmask, storing the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_int2mask&expand=3189)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_int2mask&expand=3189)
#[inline]
#[target_feature(enable = "avx512f")] // generate normal and code instead of kmovw
pub unsafe fn _mm512_int2mask(mask: i32) -> __mmask16 {
@@ -25798,7 +25798,7 @@ pub unsafe fn _mm512_int2mask(mask: i32) -> __mmask16 {
/// Converts bit mask k1 into an integer value, storing the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_mask2int&expand=3544)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_mask2int&expand=3544)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kmovw
@@ -25809,7 +25809,7 @@ pub unsafe fn _mm512_mask2int(k1: __mmask16) -> i32 {
/// Unpack and interleave 8 bits from masks a and b, and store the 16-bit result in k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_kunpackb&expand=3280)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kunpackb&expand=3280)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(mov))] // generate normal and code instead of kunpckbw
@@ -25821,7 +25821,7 @@ pub unsafe fn _mm512_kunpackb(a: __mmask16, b: __mmask16) -> __mmask16 {
/// Performs bitwise OR between k1 and k2, storing the result in dst. CF flag is set if dst consists of all 1's.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_kortestc&expand=3247)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_kortestc&expand=3247)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(cmp))] // generate normal and code instead of kortestw
@@ -25836,7 +25836,7 @@ pub unsafe fn _mm512_kortestc(a: __mmask16, b: __mmask16) -> i32 {
/// Compute the bitwise AND of packed 32-bit integers in a and b, producing intermediate 32-bit values, and set the corresponding bit in result mask k if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_test_epi32_mask&expand=5890)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_test_epi32_mask&expand=5890)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vptestmd))]
@@ -25848,7 +25848,7 @@ pub unsafe fn _mm512_test_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compute the bitwise AND of packed 32-bit integers in a and b, producing intermediate 32-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_test_epi32_mask&expand=5889)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_test_epi32_mask&expand=5889)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vptestmd))]
@@ -25860,7 +25860,7 @@ pub unsafe fn _mm512_mask_test_epi32_mask(k: __mmask16, a: __m512i, b: __m512i)
/// Compute the bitwise AND of packed 32-bit integers in a and b, producing intermediate 32-bit values, and set the corresponding bit in result mask k if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_test_epi32_mask&expand=5888)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_test_epi32_mask&expand=5888)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmd))]
@@ -25872,7 +25872,7 @@ pub unsafe fn _mm256_test_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compute the bitwise AND of packed 32-bit integers in a and b, producing intermediate 32-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_test_epi32_mask&expand=5887)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_test_epi32_mask&expand=5887)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmd))]
@@ -25884,7 +25884,7 @@ pub unsafe fn _mm256_mask_test_epi32_mask(k: __mmask8, a: __m256i, b: __m256i) -
/// Compute the bitwise AND of packed 32-bit integers in a and b, producing intermediate 32-bit values, and set the corresponding bit in result mask k if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_epi32_mask&expand=5886)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_epi32_mask&expand=5886)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmd))]
@@ -25896,7 +25896,7 @@ pub unsafe fn _mm_test_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compute the bitwise AND of packed 32-bit integers in a and b, producing intermediate 32-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_test_epi32_mask&expand=5885)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_test_epi32_mask&expand=5885)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmd))]
@@ -25908,7 +25908,7 @@ pub unsafe fn _mm_mask_test_epi32_mask(k: __mmask8, a: __m128i, b: __m128i) -> _
/// Compute the bitwise AND of packed 64-bit integers in a and b, producing intermediate 64-bit values, and set the corresponding bit in result mask k if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_test_epi64_mask&expand=5896)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_test_epi64_mask&expand=5896)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vptestmq))]
@@ -25920,7 +25920,7 @@ pub unsafe fn _mm512_test_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compute the bitwise AND of packed 64-bit integers in a and b, producing intermediate 64-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_test_epi64_mask&expand=5895)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_test_epi64_mask&expand=5895)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vptestmq))]
@@ -25932,7 +25932,7 @@ pub unsafe fn _mm512_mask_test_epi64_mask(k: __mmask8, a: __m512i, b: __m512i) -
/// Compute the bitwise AND of packed 64-bit integers in a and b, producing intermediate 64-bit values, and set the corresponding bit in result mask k if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_test_epi64_mask&expand=5894)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_test_epi64_mask&expand=5894)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmq))]
@@ -25944,7 +25944,7 @@ pub unsafe fn _mm256_test_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compute the bitwise AND of packed 64-bit integers in a and b, producing intermediate 64-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_test_epi64_mask&expand=5893)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_test_epi64_mask&expand=5893)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmq))]
@@ -25956,7 +25956,7 @@ pub unsafe fn _mm256_mask_test_epi64_mask(k: __mmask8, a: __m256i, b: __m256i) -
/// Compute the bitwise AND of packed 64-bit integers in a and b, producing intermediate 64-bit values, and set the corresponding bit in result mask k if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_epi64_mask&expand=5892)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_epi64_mask&expand=5892)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmq))]
@@ -25968,7 +25968,7 @@ pub unsafe fn _mm_test_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compute the bitwise AND of packed 64-bit integers in a and b, producing intermediate 64-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is non-zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_test_epi64_mask&expand=5891)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_test_epi64_mask&expand=5891)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestmq))]
@@ -25980,7 +25980,7 @@ pub unsafe fn _mm_mask_test_epi64_mask(k: __mmask8, a: __m128i, b: __m128i) -> _
/// Compute the bitwise NAND of packed 32-bit integers in a and b, producing intermediate 32-bit values, and set the corresponding bit in result mask k if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_testn_epi32_mask&expand=5921)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_testn_epi32_mask&expand=5921)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vptestnmd))]
@@ -25992,7 +25992,7 @@ pub unsafe fn _mm512_testn_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compute the bitwise NAND of packed 32-bit integers in a and b, producing intermediate 32-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_testn_epi32_mask&expand=5920)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_testn_epi32_mask&expand=5920)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vptestnmd))]
@@ -26004,7 +26004,7 @@ pub unsafe fn _mm512_mask_testn_epi32_mask(k: __mmask16, a: __m512i, b: __m512i)
/// Compute the bitwise NAND of packed 32-bit integers in a and b, producing intermediate 32-bit values, and set the corresponding bit in result mask k if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testn_epi32_mask&expand=5919)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testn_epi32_mask&expand=5919)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmd))]
@@ -26016,7 +26016,7 @@ pub unsafe fn _mm256_testn_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compute the bitwise NAND of packed 32-bit integers in a and b, producing intermediate 32-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_testn_epi32_mask&expand=5918)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_testn_epi32_mask&expand=5918)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmd))]
@@ -26028,7 +26028,7 @@ pub unsafe fn _mm256_mask_testn_epi32_mask(k: __mmask8, a: __m256i, b: __m256i)
/// Compute the bitwise NAND of packed 32-bit integers in a and b, producing intermediate 32-bit values, and set the corresponding bit in result mask k if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testn_epi32_mask&expand=5917)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testn_epi32_mask&expand=5917)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmd))]
@@ -26040,7 +26040,7 @@ pub unsafe fn _mm_testn_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compute the bitwise NAND of packed 32-bit integers in a and b, producing intermediate 32-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_testn_epi32_mask&expand=5916)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_testn_epi32_mask&expand=5916)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmd))]
@@ -26052,7 +26052,7 @@ pub unsafe fn _mm_mask_testn_epi32_mask(k: __mmask8, a: __m128i, b: __m128i) ->
/// Compute the bitwise NAND of packed 64-bit integers in a and b, producing intermediate 64-bit values, and set the corresponding bit in result mask k if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_testn_epi64_mask&expand=5927)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_testn_epi64_mask&expand=5927)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vptestnmq))]
@@ -26064,7 +26064,7 @@ pub unsafe fn _mm512_testn_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compute the bitwise NAND of packed 64-bit integers in a and b, producing intermediate 64-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_testn_epi64_mask&expand=5926)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_testn_epi64_mask&expand=5926)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vptestnmq))]
@@ -26076,7 +26076,7 @@ pub unsafe fn _mm512_mask_testn_epi64_mask(k: __mmask8, a: __m512i, b: __m512i)
/// Compute the bitwise NAND of packed 64-bit integers in a and b, producing intermediate 64-bit values, and set the corresponding bit in result mask k if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testn_epi64_mask&expand=5925)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_testn_epi64_mask&expand=5925)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmq))]
@@ -26088,7 +26088,7 @@ pub unsafe fn _mm256_testn_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compute the bitwise NAND of packed 64-bit integers in a and b, producing intermediate 64-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_testn_epi64_mask&expand=5924)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_testn_epi64_mask&expand=5924)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmq))]
@@ -26100,7 +26100,7 @@ pub unsafe fn _mm256_mask_testn_epi64_mask(k: __mmask8, a: __m256i, b: __m256i)
/// Compute the bitwise NAND of packed 64-bit integers in a and b, producing intermediate 64-bit values, and set the corresponding bit in result mask k if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testn_epi64_mask&expand=5923)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testn_epi64_mask&expand=5923)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmq))]
@@ -26112,7 +26112,7 @@ pub unsafe fn _mm_testn_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compute the bitwise NAND of packed 64-bit integers in a and b, producing intermediate 64-bit values, and set the corresponding bit in result mask k (subject to writemask k) if the intermediate value is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_testn_epi64_mask&expand=5922)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_testn_epi64_mask&expand=5922)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vptestnmq))]
@@ -26124,7 +26124,7 @@ pub unsafe fn _mm_mask_testn_epi64_mask(k: __mmask8, a: __m128i, b: __m128i) ->
/// Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from a into memory using a non-temporal memory hint. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_stream_ps&expand=5671)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_stream_ps&expand=5671)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovntps))]
@@ -26135,7 +26135,7 @@ pub unsafe fn _mm512_stream_ps(mem_addr: *mut f32, a: __m512) {
/// Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from a into memory using a non-temporal memory hint. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_stream_pd&expand=5667)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_stream_pd&expand=5667)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovntps))] //should be vmovntpd
@@ -26146,7 +26146,7 @@ pub unsafe fn _mm512_stream_pd(mem_addr: *mut f64, a: __m512d) {
/// Store 512-bits of integer data from a into memory using a non-temporal memory hint. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_stream_si512&expand=5675)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_stream_si512&expand=5675)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovntps))] //should be vmovntdq
@@ -26157,7 +26157,7 @@ pub unsafe fn _mm512_stream_si512(mem_addr: *mut i64, a: __m512i) {
/// Sets packed 32-bit integers in `dst` with the supplied values.
///
-/// [Intel's documentation]( https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set_ps&expand=4931)
+/// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set_ps&expand=4931)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set_ps(
@@ -26186,7 +26186,7 @@ pub unsafe fn _mm512_set_ps(
/// Sets packed 32-bit integers in `dst` with the supplied values in
/// reverse order.
///
-/// [Intel's documentation]( https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setr_ps&expand=5008)
+/// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr_ps&expand=5008)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_setr_ps(
@@ -26215,7 +26215,7 @@ pub unsafe fn _mm512_setr_ps(
/// Broadcast 64-bit float `a` to all elements of `dst`.
///
-/// [Intel's documentation]( https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_pd&expand=4975)
+/// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set1_pd&expand=4975)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set1_pd(a: f64) -> __m512d {
@@ -26224,7 +26224,7 @@ pub unsafe fn _mm512_set1_pd(a: f64) -> __m512d {
/// Broadcast 32-bit float `a` to all elements of `dst`.
///
-/// [Intel's documentation]( https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_ps&expand=4981)
+/// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set1_ps&expand=4981)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set1_ps(a: f32) -> __m512 {
@@ -26233,7 +26233,7 @@ pub unsafe fn _mm512_set1_ps(a: f32) -> __m512 {
/// Sets packed 32-bit integers in `dst` with the supplied values.
///
-/// [Intel's documentation]( https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set_epi32&expand=4908)
+/// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set_epi32&expand=4908)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set_epi32(
@@ -26261,7 +26261,7 @@ pub unsafe fn _mm512_set_epi32(
/// Broadcast 8-bit integer a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_epi8&expand=4972)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set1_epi8&expand=4972)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set1_epi8(a: i8) -> __m512i {
@@ -26270,7 +26270,7 @@ pub unsafe fn _mm512_set1_epi8(a: i8) -> __m512i {
/// Broadcast the low packed 16-bit integer from a to all elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_epi16&expand=4944)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set1_epi16&expand=4944)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set1_epi16(a: i16) -> __m512i {
@@ -26286,7 +26286,7 @@ pub unsafe fn _mm512_set1_epi32(a: i32) -> __m512i {
/// Broadcast 32-bit integer a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_set1_epi32&expand=4951)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_set1_epi32&expand=4951)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpbroadcastd))]
@@ -26297,7 +26297,7 @@ pub unsafe fn _mm512_mask_set1_epi32(src: __m512i, k: __mmask16, a: i32) -> __m5
/// Broadcast 32-bit integer a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_set1_epi32&expand=4952)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_set1_epi32&expand=4952)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpbroadcastd))]
@@ -26309,7 +26309,7 @@ pub unsafe fn _mm512_maskz_set1_epi32(k: __mmask16, a: i32) -> __m512i {
/// Broadcast 32-bit integer a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_set1_epi32&expand=4948)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_set1_epi32&expand=4948)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastd))]
@@ -26320,7 +26320,7 @@ pub unsafe fn _mm256_mask_set1_epi32(src: __m256i, k: __mmask8, a: i32) -> __m25
/// Broadcast 32-bit integer a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_set1_epi32&expand=4949)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_set1_epi32&expand=4949)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastd))]
@@ -26332,7 +26332,7 @@ pub unsafe fn _mm256_maskz_set1_epi32(k: __mmask8, a: i32) -> __m256i {
/// Broadcast 32-bit integer a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_set1_epi32&expand=4945)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_set1_epi32&expand=4945)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastd))]
@@ -26343,7 +26343,7 @@ pub unsafe fn _mm_mask_set1_epi32(src: __m128i, k: __mmask8, a: i32) -> __m128i
/// Broadcast 32-bit integer a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_set1_epi32&expand=4946)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_set1_epi32&expand=4946)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastd))]
@@ -26355,7 +26355,7 @@ pub unsafe fn _mm_maskz_set1_epi32(k: __mmask8, a: i32) -> __m128i {
/// Broadcast 64-bit integer `a` to all elements of `dst`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_epi64&expand=4961)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set1_epi64&expand=4961)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set1_epi64(a: i64) -> __m512i {
@@ -26364,7 +26364,7 @@ pub unsafe fn _mm512_set1_epi64(a: i64) -> __m512i {
/// Broadcast 64-bit integer a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_set1_epi64&expand=4959)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_set1_epi64&expand=4959)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpbroadcastq))]
@@ -26375,7 +26375,7 @@ pub unsafe fn _mm512_mask_set1_epi64(src: __m512i, k: __mmask8, a: i64) -> __m51
/// Broadcast 64-bit integer a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_set1_epi64&expand=4960)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_set1_epi64&expand=4960)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpbroadcastq))]
@@ -26387,7 +26387,7 @@ pub unsafe fn _mm512_maskz_set1_epi64(k: __mmask8, a: i64) -> __m512i {
/// Broadcast 64-bit integer a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_set1_epi64&expand=4957)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_set1_epi64&expand=4957)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastq))]
@@ -26398,7 +26398,7 @@ pub unsafe fn _mm256_mask_set1_epi64(src: __m256i, k: __mmask8, a: i64) -> __m25
/// Broadcast 64-bit integer a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_set1_epi64&expand=4958)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_set1_epi64&expand=4958)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastq))]
@@ -26410,7 +26410,7 @@ pub unsafe fn _mm256_maskz_set1_epi64(k: __mmask8, a: i64) -> __m256i {
/// Broadcast 64-bit integer a to all elements of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_set1_epi64&expand=4954)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_set1_epi64&expand=4954)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastq))]
@@ -26421,7 +26421,7 @@ pub unsafe fn _mm_mask_set1_epi64(src: __m128i, k: __mmask8, a: i64) -> __m128i
/// Broadcast 64-bit integer a to all elements of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_set1_epi64&expand=4955)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_set1_epi64&expand=4955)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpbroadcastq))]
@@ -26433,7 +26433,7 @@ pub unsafe fn _mm_maskz_set1_epi64(k: __mmask8, a: i64) -> __m128i {
/// Set packed 64-bit integers in dst with the repeated 4 element sequence.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set4_epi64&expand=4983)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set4_epi64&expand=4983)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i {
@@ -26443,7 +26443,7 @@ pub unsafe fn _mm512_set4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i {
/// Set packed 64-bit integers in dst with the repeated 4 element sequence in reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setr4_epi64&expand=5010)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr4_epi64&expand=5010)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_setr4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i {
@@ -26453,7 +26453,7 @@ pub unsafe fn _mm512_setr4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i {
/// Compare packed single-precision (32-bit) floating-point elements in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmplt_ps_mask&expand=1074)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_ps_mask&expand=1074)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26463,7 +26463,7 @@ pub unsafe fn _mm512_cmplt_ps_mask(a: __m512, b: __m512) -> __mmask16 {
/// Compare packed single-precision (32-bit) floating-point elements in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmplt_ps_mask&expand=1075)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_ps_mask&expand=1075)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26473,7 +26473,7 @@ pub unsafe fn _mm512_mask_cmplt_ps_mask(k1: __mmask16, a: __m512, b: __m512) ->
/// Compare packed single-precision (32-bit) floating-point elements in a and b for not-less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpnlt_ps_mask&expand=1154)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpnlt_ps_mask&expand=1154)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26483,7 +26483,7 @@ pub unsafe fn _mm512_cmpnlt_ps_mask(a: __m512, b: __m512) -> __mmask16 {
/// Compare packed single-precision (32-bit) floating-point elements in a and b for not-less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpnlt_ps_mask&expand=1155)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpnlt_ps_mask&expand=1155)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26493,7 +26493,7 @@ pub unsafe fn _mm512_mask_cmpnlt_ps_mask(k1: __mmask16, a: __m512, b: __m512) ->
/// Compare packed single-precision (32-bit) floating-point elements in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmple_ps_mask&expand=1013)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_ps_mask&expand=1013)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26503,7 +26503,7 @@ pub unsafe fn _mm512_cmple_ps_mask(a: __m512, b: __m512) -> __mmask16 {
/// Compare packed single-precision (32-bit) floating-point elements in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmple_ps_mask&expand=1014)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_ps_mask&expand=1014)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26513,7 +26513,7 @@ pub unsafe fn _mm512_mask_cmple_ps_mask(k1: __mmask16, a: __m512, b: __m512) ->
/// Compare packed single-precision (32-bit) floating-point elements in a and b for not-less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpnle_ps_mask&expand=1146)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpnle_ps_mask&expand=1146)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26523,7 +26523,7 @@ pub unsafe fn _mm512_cmpnle_ps_mask(a: __m512, b: __m512) -> __mmask16 {
/// Compare packed single-precision (32-bit) floating-point elements in a and b for not-less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpnle_ps_mask&expand=1147)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpnle_ps_mask&expand=1147)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26533,7 +26533,7 @@ pub unsafe fn _mm512_mask_cmpnle_ps_mask(k1: __mmask16, a: __m512, b: __m512) ->
/// Compare packed single-precision (32-bit) floating-point elements in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpeq_ps_mask&expand=828)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_ps_mask&expand=828)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26543,7 +26543,7 @@ pub unsafe fn _mm512_cmpeq_ps_mask(a: __m512, b: __m512) -> __mmask16 {
/// Compare packed single-precision (32-bit) floating-point elements in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpeq_ps_mask&expand=829)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_ps_mask&expand=829)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26553,7 +26553,7 @@ pub unsafe fn _mm512_mask_cmpeq_ps_mask(k1: __mmask16, a: __m512, b: __m512) ->
/// Compare packed single-precision (32-bit) floating-point elements in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpneq_ps_mask&expand=1130)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_ps_mask&expand=1130)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26563,7 +26563,7 @@ pub unsafe fn _mm512_cmpneq_ps_mask(a: __m512, b: __m512) -> __mmask16 {
/// Compare packed single-precision (32-bit) floating-point elements in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpneq_ps_mask&expand=1131)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_ps_mask&expand=1131)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26573,13 +26573,13 @@ pub unsafe fn _mm512_mask_cmpneq_ps_mask(k1: __mmask16, a: __m512, b: __m512) ->
/// Compare packed single-precision (32-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_ps_mask&expand=749)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_ps_mask&expand=749)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))]
pub unsafe fn _mm512_cmp_ps_mask<const IMM8: i32>(a: __m512, b: __m512) -> __mmask16 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let neg_one = -1;
let a = a.as_f32x16();
let b = b.as_f32x16();
@@ -26589,7 +26589,7 @@ pub unsafe fn _mm512_cmp_ps_mask<const IMM8: i32>(a: __m512, b: __m512) -> __mma
/// Compare packed single-precision (32-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_ps_mask&expand=750)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_ps_mask&expand=750)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(3)]
@@ -26599,7 +26599,7 @@ pub unsafe fn _mm512_mask_cmp_ps_mask<const IMM8: i32>(
a: __m512,
b: __m512,
) -> __mmask16 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let a = a.as_f32x16();
let b = b.as_f32x16();
let r = vcmpps(a, b, IMM8, k1 as i16, _MM_FROUND_CUR_DIRECTION);
@@ -26608,13 +26608,13 @@ pub unsafe fn _mm512_mask_cmp_ps_mask<const IMM8: i32>(
/// Compare packed single-precision (32-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_ps_mask&expand=747)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_ps_mask&expand=747)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))]
pub unsafe fn _mm256_cmp_ps_mask<const IMM8: i32>(a: __m256, b: __m256) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let neg_one = -1;
let a = a.as_f32x8();
let b = b.as_f32x8();
@@ -26624,7 +26624,7 @@ pub unsafe fn _mm256_cmp_ps_mask<const IMM8: i32>(a: __m256, b: __m256) -> __mma
/// Compare packed single-precision (32-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_ps_mask&expand=748)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_ps_mask&expand=748)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -26634,7 +26634,7 @@ pub unsafe fn _mm256_mask_cmp_ps_mask<const IMM8: i32>(
a: __m256,
b: __m256,
) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let a = a.as_f32x8();
let b = b.as_f32x8();
let r = vcmpps256(a, b, IMM8, k1 as i8);
@@ -26643,13 +26643,13 @@ pub unsafe fn _mm256_mask_cmp_ps_mask<const IMM8: i32>(
/// Compare packed single-precision (32-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ps_mask&expand=745)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_ps_mask&expand=745)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))]
pub unsafe fn _mm_cmp_ps_mask<const IMM8: i32>(a: __m128, b: __m128) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let neg_one = -1;
let a = a.as_f32x4();
let b = b.as_f32x4();
@@ -26659,7 +26659,7 @@ pub unsafe fn _mm_cmp_ps_mask<const IMM8: i32>(a: __m128, b: __m128) -> __mmask8
/// Compare packed single-precision (32-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_ps_mask&expand=746)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_ps_mask&expand=746)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -26669,7 +26669,7 @@ pub unsafe fn _mm_mask_cmp_ps_mask<const IMM8: i32>(
a: __m128,
b: __m128,
) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let a = a.as_f32x4();
let b = b.as_f32x4();
let r = vcmpps128(a, b, IMM8, k1 as i8);
@@ -26679,7 +26679,7 @@ pub unsafe fn _mm_mask_cmp_ps_mask<const IMM8: i32>(
/// Compare packed single-precision (32-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_round_ps_mask&expand=753)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_round_ps_mask&expand=753)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))]
@@ -26688,7 +26688,7 @@ pub unsafe fn _mm512_cmp_round_ps_mask<const IMM5: i32, const SAE: i32>(
a: __m512,
b: __m512,
) -> __mmask16 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
static_assert_mantissas_sae!(SAE);
let neg_one = -1;
let a = a.as_f32x16();
@@ -26700,7 +26700,7 @@ pub unsafe fn _mm512_cmp_round_ps_mask<const IMM5: i32, const SAE: i32>(
/// Compare packed single-precision (32-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_round_ps_mask&expand=754)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_round_ps_mask&expand=754)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))]
@@ -26710,7 +26710,7 @@ pub unsafe fn _mm512_mask_cmp_round_ps_mask<const IMM5: i32, const SAE: i32>(
a: __m512,
b: __m512,
) -> __mmask16 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x16();
let b = b.as_f32x16();
@@ -26720,7 +26720,7 @@ pub unsafe fn _mm512_mask_cmp_round_ps_mask<const IMM5: i32, const SAE: i32>(
/// Compare packed single-precision (32-bit) floating-point elements in a and b to see if neither is NaN, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpord_ps_mask&expand=1162)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpord_ps_mask&expand=1162)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmps
@@ -26730,7 +26730,7 @@ pub unsafe fn _mm512_cmpord_ps_mask(a: __m512, b: __m512) -> __mmask16 {
/// Compare packed single-precision (32-bit) floating-point elements in a and b to see if neither is NaN, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpord_ps_mask&expand=1163)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpord_ps_mask&expand=1163)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26740,7 +26740,7 @@ pub unsafe fn _mm512_mask_cmpord_ps_mask(k1: __mmask16, a: __m512, b: __m512) ->
/// Compare packed single-precision (32-bit) floating-point elements in a and b to see if either is NaN, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpunord_ps_mask&expand=1170)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpunord_ps_mask&expand=1170)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26750,7 +26750,7 @@ pub unsafe fn _mm512_cmpunord_ps_mask(a: __m512, b: __m512) -> __mmask16 {
/// Compare packed single-precision (32-bit) floating-point elements in a and b to see if either is NaN, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpunord_ps_mask&expand=1171)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpunord_ps_mask&expand=1171)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmpps
@@ -26760,7 +26760,7 @@ pub unsafe fn _mm512_mask_cmpunord_ps_mask(k1: __mmask16, a: __m512, b: __m512)
/// Compare packed double-precision (64-bit) floating-point elements in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmplt_pd_mask&expand=1071)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_pd_mask&expand=1071)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -26770,7 +26770,7 @@ pub unsafe fn _mm512_cmplt_pd_mask(a: __m512d, b: __m512d) -> __mmask8 {
/// Compare packed double-precision (64-bit) floating-point elements in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmplt_pd_mask&expand=1072)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_pd_mask&expand=1072)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -26780,7 +26780,7 @@ pub unsafe fn _mm512_mask_cmplt_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) ->
/// Compare packed double-precision (64-bit) floating-point elements in a and b for not-less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpnlt_pd_mask&expand=1151)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpnlt_pd_mask&expand=1151)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -26790,7 +26790,7 @@ pub unsafe fn _mm512_cmpnlt_pd_mask(a: __m512d, b: __m512d) -> __mmask8 {
/// Compare packed double-precision (64-bit) floating-point elements in a and b for not-less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpnlt_pd_mask&expand=1152)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpnlt_pd_mask&expand=1152)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -26800,7 +26800,7 @@ pub unsafe fn _mm512_mask_cmpnlt_pd_mask(m: __mmask8, a: __m512d, b: __m512d) ->
/// Compare packed double-precision (64-bit) floating-point elements in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmple_pd_mask&expand=1010)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_pd_mask&expand=1010)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -26810,7 +26810,7 @@ pub unsafe fn _mm512_cmple_pd_mask(a: __m512d, b: __m512d) -> __mmask8 {
/// Compare packed double-precision (64-bit) floating-point elements in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmple_pd_mask&expand=1011)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_pd_mask&expand=1011)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -26820,7 +26820,7 @@ pub unsafe fn _mm512_mask_cmple_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) ->
/// Compare packed double-precision (64-bit) floating-point elements in a and b for not-less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpnle_pd_mask&expand=1143)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpnle_pd_mask&expand=1143)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -26830,7 +26830,7 @@ pub unsafe fn _mm512_cmpnle_pd_mask(a: __m512d, b: __m512d) -> __mmask8 {
/// Compare packed double-precision (64-bit) floating-point elements in a and b for not-less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpnle_pd_mask&expand=1144)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpnle_pd_mask&expand=1144)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -26840,7 +26840,7 @@ pub unsafe fn _mm512_mask_cmpnle_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -
/// Compare packed double-precision (64-bit) floating-point elements in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpeq_pd_mask&expand=822)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_pd_mask&expand=822)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -26850,7 +26850,7 @@ pub unsafe fn _mm512_cmpeq_pd_mask(a: __m512d, b: __m512d) -> __mmask8 {
/// Compare packed double-precision (64-bit) floating-point elements in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpeq_pd_mask&expand=823)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_pd_mask&expand=823)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -26860,7 +26860,7 @@ pub unsafe fn _mm512_mask_cmpeq_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) ->
/// Compare packed double-precision (64-bit) floating-point elements in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpneq_pd_mask&expand=1127)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_pd_mask&expand=1127)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -26870,7 +26870,7 @@ pub unsafe fn _mm512_cmpneq_pd_mask(a: __m512d, b: __m512d) -> __mmask8 {
/// Compare packed double-precision (64-bit) floating-point elements in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpneq_pd_mask&expand=1128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_pd_mask&expand=1128)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -26880,13 +26880,13 @@ pub unsafe fn _mm512_mask_cmpneq_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -
/// Compare packed double-precision (64-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_pd_mask&expand=741)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_pd_mask&expand=741)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))]
pub unsafe fn _mm512_cmp_pd_mask<const IMM8: i32>(a: __m512d, b: __m512d) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let neg_one = -1;
let a = a.as_f64x8();
let b = b.as_f64x8();
@@ -26896,7 +26896,7 @@ pub unsafe fn _mm512_cmp_pd_mask<const IMM8: i32>(a: __m512d, b: __m512d) -> __m
/// Compare packed double-precision (64-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_pd_mask&expand=742)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_pd_mask&expand=742)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(3)]
@@ -26906,7 +26906,7 @@ pub unsafe fn _mm512_mask_cmp_pd_mask<const IMM8: i32>(
a: __m512d,
b: __m512d,
) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let a = a.as_f64x8();
let b = b.as_f64x8();
let r = vcmppd(a, b, IMM8, k1 as i8, _MM_FROUND_CUR_DIRECTION);
@@ -26915,13 +26915,13 @@ pub unsafe fn _mm512_mask_cmp_pd_mask<const IMM8: i32>(
/// Compare packed double-precision (64-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_pd_mask&expand=739)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_pd_mask&expand=739)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))]
pub unsafe fn _mm256_cmp_pd_mask<const IMM8: i32>(a: __m256d, b: __m256d) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let neg_one = -1;
let a = a.as_f64x4();
let b = b.as_f64x4();
@@ -26931,7 +26931,7 @@ pub unsafe fn _mm256_cmp_pd_mask<const IMM8: i32>(a: __m256d, b: __m256d) -> __m
/// Compare packed double-precision (64-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_pd_mask&expand=740)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_pd_mask&expand=740)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -26941,7 +26941,7 @@ pub unsafe fn _mm256_mask_cmp_pd_mask<const IMM8: i32>(
a: __m256d,
b: __m256d,
) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let a = a.as_f64x4();
let b = b.as_f64x4();
let r = vcmppd256(a, b, IMM8, k1 as i8);
@@ -26950,13 +26950,13 @@ pub unsafe fn _mm256_mask_cmp_pd_mask<const IMM8: i32>(
/// Compare packed double-precision (64-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_pd_mask&expand=737)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_pd_mask&expand=737)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))]
pub unsafe fn _mm_cmp_pd_mask<const IMM8: i32>(a: __m128d, b: __m128d) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let neg_one = -1;
let a = a.as_f64x2();
let b = b.as_f64x2();
@@ -26966,7 +26966,7 @@ pub unsafe fn _mm_cmp_pd_mask<const IMM8: i32>(a: __m128d, b: __m128d) -> __mmas
/// Compare packed double-precision (64-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_pd_mask&expand=738)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_pd_mask&expand=738)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -26976,7 +26976,7 @@ pub unsafe fn _mm_mask_cmp_pd_mask<const IMM8: i32>(
a: __m128d,
b: __m128d,
) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let a = a.as_f64x2();
let b = b.as_f64x2();
let r = vcmppd128(a, b, IMM8, k1 as i8);
@@ -26986,7 +26986,7 @@ pub unsafe fn _mm_mask_cmp_pd_mask<const IMM8: i32>(
/// Compare packed double-precision (64-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_round_pd_mask&expand=751)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_round_pd_mask&expand=751)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))]
@@ -26995,7 +26995,7 @@ pub unsafe fn _mm512_cmp_round_pd_mask<const IMM5: i32, const SAE: i32>(
a: __m512d,
b: __m512d,
) -> __mmask8 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
static_assert_mantissas_sae!(SAE);
let neg_one = -1;
let a = a.as_f64x8();
@@ -27007,7 +27007,7 @@ pub unsafe fn _mm512_cmp_round_pd_mask<const IMM5: i32, const SAE: i32>(
/// Compare packed double-precision (64-bit) floating-point elements in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_round_pd_mask&expand=752)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_round_pd_mask&expand=752)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))]
@@ -27017,7 +27017,7 @@ pub unsafe fn _mm512_mask_cmp_round_pd_mask<const IMM5: i32, const SAE: i32>(
a: __m512d,
b: __m512d,
) -> __mmask8 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x8();
let b = b.as_f64x8();
@@ -27027,7 +27027,7 @@ pub unsafe fn _mm512_mask_cmp_round_pd_mask<const IMM5: i32, const SAE: i32>(
/// Compare packed double-precision (64-bit) floating-point elements in a and b to see if neither is NaN, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpord_pd_mask&expand=1159)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpord_pd_mask&expand=1159)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -27037,7 +27037,7 @@ pub unsafe fn _mm512_cmpord_pd_mask(a: __m512d, b: __m512d) -> __mmask8 {
/// Compare packed double-precision (64-bit) floating-point elements in a and b to see if neither is NaN, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpord_pd_mask&expand=1160)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpord_pd_mask&expand=1160)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -27047,7 +27047,7 @@ pub unsafe fn _mm512_mask_cmpord_pd_mask(k1: __mmask8, a: __m512d, b: __m512d) -
/// Compare packed double-precision (64-bit) floating-point elements in a and b to see if either is NaN, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpunord_pd_mask&expand=1167)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpunord_pd_mask&expand=1167)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -27057,7 +27057,7 @@ pub unsafe fn _mm512_cmpunord_pd_mask(a: __m512d, b: __m512d) -> __mmask8 {
/// Compare packed double-precision (64-bit) floating-point elements in a and b to see if either is NaN, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpunord_pd_mask&expand=1168)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpunord_pd_mask&expand=1168)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp))] //should be vcmppd
@@ -27067,13 +27067,13 @@ pub unsafe fn _mm512_mask_cmpunord_pd_mask(k1: __mmask8, a: __m512d, b: __m512d)
/// Compare the lower single-precision (32-bit) floating-point element in a and b based on the comparison operand specified by imm8, and store the result in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ss_mask&expand=763)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_ss_mask&expand=763)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))]
pub unsafe fn _mm_cmp_ss_mask<const IMM8: i32>(a: __m128, b: __m128) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let neg_one = -1;
let r = vcmpss(a, b, IMM8, neg_one, _MM_FROUND_CUR_DIRECTION);
transmute(r)
@@ -27081,7 +27081,7 @@ pub unsafe fn _mm_cmp_ss_mask<const IMM8: i32>(a: __m128, b: __m128) -> __mmask8
/// Compare the lower single-precision (32-bit) floating-point element in a and b based on the comparison operand specified by imm8, and store the result in mask vector k using zeromask k1 (the element is zeroed out when mask bit 0 is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_ss_mask&expand=764)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_ss_mask&expand=764)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(3)]
@@ -27091,7 +27091,7 @@ pub unsafe fn _mm_mask_cmp_ss_mask<const IMM8: i32>(
a: __m128,
b: __m128,
) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let r = vcmpss(a, b, IMM8, k1 as i8, _MM_FROUND_CUR_DIRECTION);
transmute(r)
}
@@ -27099,7 +27099,7 @@ pub unsafe fn _mm_mask_cmp_ss_mask<const IMM8: i32>(
/// Compare the lower single-precision (32-bit) floating-point element in a and b based on the comparison operand specified by imm8, and store the result in mask vector k.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_round_ss_mask&expand=757)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_round_ss_mask&expand=757)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))]
@@ -27108,7 +27108,7 @@ pub unsafe fn _mm_cmp_round_ss_mask<const IMM5: i32, const SAE: i32>(
a: __m128,
b: __m128,
) -> __mmask8 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
static_assert_mantissas_sae!(SAE);
let neg_one = -1;
let r = vcmpss(a, b, IMM5, neg_one, SAE);
@@ -27118,7 +27118,7 @@ pub unsafe fn _mm_cmp_round_ss_mask<const IMM5: i32, const SAE: i32>(
/// Compare the lower single-precision (32-bit) floating-point element in a and b based on the comparison operand specified by imm8, and store the result in mask vector k using zeromask k1 (the element is zeroed out when mask bit 0 is not seti).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_round_ss_mask&expand=758)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_round_ss_mask&expand=758)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))]
@@ -27128,7 +27128,7 @@ pub unsafe fn _mm_mask_cmp_round_ss_mask<const IMM5: i32, const SAE: i32>(
a: __m128,
b: __m128,
) -> __mmask8 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
static_assert_mantissas_sae!(SAE);
let r = vcmpss(a, b, IMM5, k1 as i8, SAE);
transmute(r)
@@ -27136,13 +27136,13 @@ pub unsafe fn _mm_mask_cmp_round_ss_mask<const IMM5: i32, const SAE: i32>(
/// Compare the lower double-precision (64-bit) floating-point element in a and b based on the comparison operand specified by imm8, and store the result in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_sd_mask&expand=760)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_sd_mask&expand=760)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vcmp, IMM8 = 0))]
pub unsafe fn _mm_cmp_sd_mask<const IMM8: i32>(a: __m128d, b: __m128d) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let neg_one = -1;
let r = vcmpsd(a, b, IMM8, neg_one, _MM_FROUND_CUR_DIRECTION);
transmute(r)
@@ -27150,7 +27150,7 @@ pub unsafe fn _mm_cmp_sd_mask<const IMM8: i32>(a: __m128d, b: __m128d) -> __mmas
/// Compare the lower double-precision (64-bit) floating-point element in a and b based on the comparison operand specified by imm8, and store the result in mask vector k using zeromask k1 (the element is zeroed out when mask bit 0 is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_sd_mask&expand=761)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_sd_mask&expand=761)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(3)]
@@ -27160,7 +27160,7 @@ pub unsafe fn _mm_mask_cmp_sd_mask<const IMM8: i32>(
a: __m128d,
b: __m128d,
) -> __mmask8 {
- static_assert_imm5!(IMM8);
+ static_assert_uimm_bits!(IMM8, 5);
let r = vcmpsd(a, b, IMM8, k1 as i8, _MM_FROUND_CUR_DIRECTION);
transmute(r)
}
@@ -27168,7 +27168,7 @@ pub unsafe fn _mm_mask_cmp_sd_mask<const IMM8: i32>(
/// Compare the lower double-precision (64-bit) floating-point element in a and b based on the comparison operand specified by imm8, and store the result in mask vector k.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_round_sd_mask&expand=755)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_round_sd_mask&expand=755)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))]
@@ -27177,7 +27177,7 @@ pub unsafe fn _mm_cmp_round_sd_mask<const IMM5: i32, const SAE: i32>(
a: __m128d,
b: __m128d,
) -> __mmask8 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
static_assert_mantissas_sae!(SAE);
let neg_one = -1;
let r = vcmpsd(a, b, IMM5, neg_one, SAE);
@@ -27187,7 +27187,7 @@ pub unsafe fn _mm_cmp_round_sd_mask<const IMM5: i32, const SAE: i32>(
/// Compare the lower double-precision (64-bit) floating-point element in a and b based on the comparison operand specified by imm8, and store the result in mask vector k using zeromask k1 (the element is zeroed out when mask bit 0 is not set).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_round_sd_mask&expand=756)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_round_sd_mask&expand=756)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp, IMM5 = 0, SAE = 4))]
@@ -27197,7 +27197,7 @@ pub unsafe fn _mm_mask_cmp_round_sd_mask<const IMM5: i32, const SAE: i32>(
a: __m128d,
b: __m128d,
) -> __mmask8 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
static_assert_mantissas_sae!(SAE);
let r = vcmpsd(a, b, IMM5, k1 as i8, SAE);
transmute(r)
@@ -27205,7 +27205,7 @@ pub unsafe fn _mm_mask_cmp_round_sd_mask<const IMM5: i32, const SAE: i32>(
/// Compare packed unsigned 32-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmplt_epu32_mask&expand=1056)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_epu32_mask&expand=1056)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27215,7 +27215,7 @@ pub unsafe fn _mm512_cmplt_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compare packed unsigned 32-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmplt_epu32_mask&expand=1057)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epu32_mask&expand=1057)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27225,7 +27225,7 @@ pub unsafe fn _mm512_mask_cmplt_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i
/// Compare packed unsigned 32-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmplt_epu32_mask&expand=1054)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epu32_mask&expand=1054)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27235,7 +27235,7 @@ pub unsafe fn _mm256_cmplt_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed unsigned 32-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmplt_epu32_mask&expand=1055)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epu32_mask&expand=1055)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27245,7 +27245,7 @@ pub unsafe fn _mm256_mask_cmplt_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed unsigned 32-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_epu32_mask&expand=1052)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epu32_mask&expand=1052)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27255,7 +27255,7 @@ pub unsafe fn _mm_cmplt_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 32-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmplt_epu32_mask&expand=1053)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epu32_mask&expand=1053)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27265,7 +27265,7 @@ pub unsafe fn _mm_mask_cmplt_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 32-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpgt_epu32_mask&expand=933)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epu32_mask&expand=933)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27275,7 +27275,7 @@ pub unsafe fn _mm512_cmpgt_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compare packed unsigned 32-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpgt_epu32_mask&expand=934)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epu32_mask&expand=934)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27285,7 +27285,7 @@ pub unsafe fn _mm512_mask_cmpgt_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i
/// Compare packed unsigned 32-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpgt_epu32_mask&expand=931)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epu32_mask&expand=931)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27295,7 +27295,7 @@ pub unsafe fn _mm256_cmpgt_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed unsigned 32-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpgt_epu32_mask&expand=932)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epu32_mask&expand=932)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27305,7 +27305,7 @@ pub unsafe fn _mm256_mask_cmpgt_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed unsigned 32-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_epu32_mask&expand=929)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epu32_mask&expand=929)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27315,7 +27315,7 @@ pub unsafe fn _mm_cmpgt_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 32-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpgt_epu32_mask&expand=930)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epu32_mask&expand=930)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27325,7 +27325,7 @@ pub unsafe fn _mm_mask_cmpgt_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 32-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmple_epu32_mask&expand=995)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epu32_mask&expand=995)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27335,7 +27335,7 @@ pub unsafe fn _mm512_cmple_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compare packed unsigned 32-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmple_epu32_mask&expand=996)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epu32_mask&expand=996)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27345,7 +27345,7 @@ pub unsafe fn _mm512_mask_cmple_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i
/// Compare packed unsigned 32-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmple_epu32_mask&expand=993)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epu32_mask&expand=993)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27355,7 +27355,7 @@ pub unsafe fn _mm256_cmple_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed unsigned 32-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmple_epu32_mask&expand=994)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epu32_mask&expand=994)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27365,7 +27365,7 @@ pub unsafe fn _mm256_mask_cmple_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed unsigned 32-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_epu32_mask&expand=991)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epu32_mask&expand=991)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27375,7 +27375,7 @@ pub unsafe fn _mm_cmple_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 32-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmple_epu32_mask&expand=992)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epu32_mask&expand=992)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27385,7 +27385,7 @@ pub unsafe fn _mm_mask_cmple_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 32-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpge_epu32_mask&expand=873)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epu32_mask&expand=873)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27395,7 +27395,7 @@ pub unsafe fn _mm512_cmpge_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compare packed unsigned 32-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpge_epu32_mask&expand=874)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epu32_mask&expand=874)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27405,7 +27405,7 @@ pub unsafe fn _mm512_mask_cmpge_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i
/// Compare packed unsigned 32-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpge_epu32_mask&expand=871)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epu32_mask&expand=871)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27415,7 +27415,7 @@ pub unsafe fn _mm256_cmpge_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed unsigned 32-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpge_epu32_mask&expand=872)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epu32_mask&expand=872)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27425,7 +27425,7 @@ pub unsafe fn _mm256_mask_cmpge_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed unsigned 32-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_epu32_mask&expand=869)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epu32_mask&expand=869)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27435,7 +27435,7 @@ pub unsafe fn _mm_cmpge_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 32-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpge_epu32_mask&expand=870)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epu32_mask&expand=870)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27445,7 +27445,7 @@ pub unsafe fn _mm_mask_cmpge_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 32-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpeq_epu32_mask&expand=807)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epu32_mask&expand=807)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27455,7 +27455,7 @@ pub unsafe fn _mm512_cmpeq_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compare packed unsigned 32-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpeq_epu32_mask&expand=808)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epu32_mask&expand=808)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27465,7 +27465,7 @@ pub unsafe fn _mm512_mask_cmpeq_epu32_mask(k1: __mmask16, a: __m512i, b: __m512i
/// Compare packed unsigned 32-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpeq_epu32_mask&expand=805)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epu32_mask&expand=805)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27475,7 +27475,7 @@ pub unsafe fn _mm256_cmpeq_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed unsigned 32-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpeq_epu32_mask&expand=806)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epu32_mask&expand=806)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27485,7 +27485,7 @@ pub unsafe fn _mm256_mask_cmpeq_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed unsigned 32-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_epu32_mask&expand=803)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epu32_mask&expand=803)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27495,7 +27495,7 @@ pub unsafe fn _mm_cmpeq_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 32-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpeq_epu32_mask&expand=804)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epu32_mask&expand=804)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27505,7 +27505,7 @@ pub unsafe fn _mm_mask_cmpeq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 32-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpneq_epu32_mask&expand=1112)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epu32_mask&expand=1112)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27515,7 +27515,7 @@ pub unsafe fn _mm512_cmpneq_epu32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compare packed unsigned 32-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpneq_epu32_mask&expand=1113)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epu32_mask&expand=1113)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27525,7 +27525,7 @@ pub unsafe fn _mm512_mask_cmpneq_epu32_mask(k1: __mmask16, a: __m512i, b: __m512
/// Compare packed unsigned 32-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpneq_epu32_mask&expand=1110)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epu32_mask&expand=1110)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27535,7 +27535,7 @@ pub unsafe fn _mm256_cmpneq_epu32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed unsigned 32-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpneq_epu32_mask&expand=1111)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epu32_mask&expand=1111)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27545,7 +27545,7 @@ pub unsafe fn _mm256_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m256i, b: __m256i
/// Compare packed unsigned 32-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_epu32_mask&expand=1108)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epu32_mask&expand=1108)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27555,7 +27555,7 @@ pub unsafe fn _mm_cmpneq_epu32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 32-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpneq_epu32_mask&expand=1109)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epu32_mask&expand=1109)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpud
@@ -27565,7 +27565,7 @@ pub unsafe fn _mm_mask_cmpneq_epu32_mask(k1: __mmask8, a: __m128i, b: __m128i) -
/// Compare packed unsigned 32-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_epu32_mask&expand=721)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epu32_mask&expand=721)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(2)]
@@ -27574,7 +27574,7 @@ pub unsafe fn _mm512_cmp_epu32_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m512i,
b: __m512i,
) -> __mmask16 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let neg_one = -1;
let a = a.as_i32x16();
let b = b.as_i32x16();
@@ -27584,7 +27584,7 @@ pub unsafe fn _mm512_cmp_epu32_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed unsigned 32-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_epu32_mask&expand=722)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epu32_mask&expand=722)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(3)]
@@ -27594,7 +27594,7 @@ pub unsafe fn _mm512_mask_cmp_epu32_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m512i,
b: __m512i,
) -> __mmask16 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let a = a.as_i32x16();
let b = b.as_i32x16();
let r = vpcmpud(a, b, IMM3, k1 as i16);
@@ -27603,7 +27603,7 @@ pub unsafe fn _mm512_mask_cmp_epu32_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed unsigned 32-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_epu32_mask&expand=719)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epu32_mask&expand=719)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(2)]
@@ -27612,7 +27612,7 @@ pub unsafe fn _mm256_cmp_epu32_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m256i,
b: __m256i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let neg_one = -1;
let a = a.as_i32x8();
let b = b.as_i32x8();
@@ -27622,7 +27622,7 @@ pub unsafe fn _mm256_cmp_epu32_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed unsigned 32-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_epu32_mask&expand=720)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epu32_mask&expand=720)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -27632,7 +27632,7 @@ pub unsafe fn _mm256_mask_cmp_epu32_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m256i,
b: __m256i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let a = a.as_i32x8();
let b = b.as_i32x8();
let r = vpcmpud256(a, b, IMM3, k1 as i8);
@@ -27641,13 +27641,13 @@ pub unsafe fn _mm256_mask_cmp_epu32_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed unsigned 32-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_epu32_mask&expand=717)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epu32_mask&expand=717)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))]
pub unsafe fn _mm_cmp_epu32_mask<const IMM3: _MM_CMPINT_ENUM>(a: __m128i, b: __m128i) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let neg_one = -1;
let a = a.as_i32x4();
let b = b.as_i32x4();
@@ -27657,7 +27657,7 @@ pub unsafe fn _mm_cmp_epu32_mask<const IMM3: _MM_CMPINT_ENUM>(a: __m128i, b: __m
/// Compare packed unsigned 32-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_epu32_mask&expand=718)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epu32_mask&expand=718)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -27667,7 +27667,7 @@ pub unsafe fn _mm_mask_cmp_epu32_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m128i,
b: __m128i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let a = a.as_i32x4();
let b = b.as_i32x4();
let r = vpcmpud128(a, b, IMM3, k1 as i8);
@@ -27676,7 +27676,7 @@ pub unsafe fn _mm_mask_cmp_epu32_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed signed 32-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmplt_epi32_mask&expand=1029)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_epi32_mask&expand=1029)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27686,7 +27686,7 @@ pub unsafe fn _mm512_cmplt_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compare packed signed 32-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmplt_epi32_mask&expand=1031)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epi32_mask&expand=1031)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27696,7 +27696,7 @@ pub unsafe fn _mm512_mask_cmplt_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i
/// Compare packed signed 32-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmplt_epi32_mask&expand=1027)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epi32_mask&expand=1027)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27706,7 +27706,7 @@ pub unsafe fn _mm256_cmplt_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed signed 32-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmplt_epi32_mask&expand=1028)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epi32_mask&expand=1028)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27716,7 +27716,7 @@ pub unsafe fn _mm256_mask_cmplt_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed signed 32-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_epi32_mask&expand=1025)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi32_mask&expand=1025)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27726,7 +27726,7 @@ pub unsafe fn _mm_cmplt_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 32-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmplt_epi32_mask&expand=1026)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epi32_mask&expand=1026)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27736,7 +27736,7 @@ pub unsafe fn _mm_mask_cmplt_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed signed 32-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpgt_epi32_mask&expand=905)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epi32_mask&expand=905)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27746,7 +27746,7 @@ pub unsafe fn _mm512_cmpgt_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compare packed signed 32-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpgt_epi32_mask&expand=906)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epi32_mask&expand=906)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27756,7 +27756,7 @@ pub unsafe fn _mm512_mask_cmpgt_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i
/// Compare packed signed 32-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpgt_epi32_mask&expand=903)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epi32_mask&expand=903)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27766,7 +27766,7 @@ pub unsafe fn _mm256_cmpgt_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed signed 32-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpgt_epi32_mask&expand=904)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epi32_mask&expand=904)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27776,7 +27776,7 @@ pub unsafe fn _mm256_mask_cmpgt_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed signed 32-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_epi32_mask&expand=901)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi32_mask&expand=901)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27786,7 +27786,7 @@ pub unsafe fn _mm_cmpgt_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 32-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpgt_epi32_mask&expand=902)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epi32_mask&expand=902)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27796,7 +27796,7 @@ pub unsafe fn _mm_mask_cmpgt_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed signed 32-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmple_epi32_mask&expand=971)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epi32_mask&expand=971)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27806,7 +27806,7 @@ pub unsafe fn _mm512_cmple_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compare packed signed 32-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmple_epi32_mask&expand=972)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epi32_mask&expand=972)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27816,7 +27816,7 @@ pub unsafe fn _mm512_mask_cmple_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i
/// Compare packed signed 32-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmple_epi32_mask&expand=969)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epi32_mask&expand=969)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27826,7 +27826,7 @@ pub unsafe fn _mm256_cmple_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed signed 32-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmple_epi32_mask&expand=970)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epi32_mask&expand=970)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27836,7 +27836,7 @@ pub unsafe fn _mm256_mask_cmple_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed signed 32-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_epi32_mask&expand=967)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epi32_mask&expand=967)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27846,7 +27846,7 @@ pub unsafe fn _mm_cmple_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 32-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmple_epi32_mask&expand=968)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epi32_mask&expand=968)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27856,7 +27856,7 @@ pub unsafe fn _mm_mask_cmple_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed signed 32-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpge_epi32_mask&expand=849)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epi32_mask&expand=849)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27866,7 +27866,7 @@ pub unsafe fn _mm512_cmpge_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compare packed signed 32-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpge_epi32_mask&expand=850)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epi32_mask&expand=850)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27876,7 +27876,7 @@ pub unsafe fn _mm512_mask_cmpge_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i
/// Compare packed signed 32-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpge_epi32_mask&expand=847)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epi32_mask&expand=847)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27886,7 +27886,7 @@ pub unsafe fn _mm256_cmpge_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed signed 32-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpge_epi32_mask&expand=848)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epi32_mask&expand=848)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27896,7 +27896,7 @@ pub unsafe fn _mm256_mask_cmpge_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed signed 32-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_epi32_mask&expand=845)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epi32_mask&expand=845)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27906,7 +27906,7 @@ pub unsafe fn _mm_cmpge_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 32-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpge_epi32_mask&expand=846)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epi32_mask&expand=846)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27916,7 +27916,7 @@ pub unsafe fn _mm_mask_cmpge_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed 32-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpeq_epi32_mask&expand=779)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epi32_mask&expand=779)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27926,7 +27926,7 @@ pub unsafe fn _mm512_cmpeq_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compare packed 32-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpeq_epi32_mask&expand=780)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epi32_mask&expand=780)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27936,7 +27936,7 @@ pub unsafe fn _mm512_mask_cmpeq_epi32_mask(k1: __mmask16, a: __m512i, b: __m512i
/// Compare packed 32-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpeq_epi32_mask&expand=777)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epi32_mask&expand=777)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27946,7 +27946,7 @@ pub unsafe fn _mm256_cmpeq_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed 32-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpeq_epi32_mask&expand=778)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epi32_mask&expand=778)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27956,7 +27956,7 @@ pub unsafe fn _mm256_mask_cmpeq_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed 32-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_epi32_mask&expand=775)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi32_mask&expand=775)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27966,7 +27966,7 @@ pub unsafe fn _mm_cmpeq_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed 32-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpeq_epi32_mask&expand=776)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epi32_mask&expand=776)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27976,7 +27976,7 @@ pub unsafe fn _mm_mask_cmpeq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed 32-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpneq_epi32_mask&expand=1088)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epi32_mask&expand=1088)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27986,7 +27986,7 @@ pub unsafe fn _mm512_cmpneq_epi32_mask(a: __m512i, b: __m512i) -> __mmask16 {
/// Compare packed 32-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpneq_epi32_mask&expand=1089)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epi32_mask&expand=1089)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -27996,7 +27996,7 @@ pub unsafe fn _mm512_mask_cmpneq_epi32_mask(k1: __mmask16, a: __m512i, b: __m512
/// Compare packed 32-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpneq_epi32_mask&expand=1086)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epi32_mask&expand=1086)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -28006,7 +28006,7 @@ pub unsafe fn _mm256_cmpneq_epi32_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed 32-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpneq_epi32_mask&expand=1087)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epi32_mask&expand=1087)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -28016,7 +28016,7 @@ pub unsafe fn _mm256_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m256i, b: __m256i
/// Compare packed 32-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_epi32_mask&expand=1084)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epi32_mask&expand=1084)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -28026,7 +28026,7 @@ pub unsafe fn _mm_cmpneq_epi32_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed 32-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpneq_epi32_mask&expand=1085)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epi32_mask&expand=1085)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpd
@@ -28036,7 +28036,7 @@ pub unsafe fn _mm_mask_cmpneq_epi32_mask(k1: __mmask8, a: __m128i, b: __m128i) -
/// Compare packed signed 32-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_epi32_mask&expand=697)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epi32_mask&expand=697)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(2)]
@@ -28045,7 +28045,7 @@ pub unsafe fn _mm512_cmp_epi32_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m512i,
b: __m512i,
) -> __mmask16 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let neg_one = -1;
let a = a.as_i32x16();
let b = b.as_i32x16();
@@ -28055,7 +28055,7 @@ pub unsafe fn _mm512_cmp_epi32_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed signed 32-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_epi32_mask&expand=698)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epi32_mask&expand=698)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(3)]
@@ -28065,7 +28065,7 @@ pub unsafe fn _mm512_mask_cmp_epi32_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m512i,
b: __m512i,
) -> __mmask16 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let a = a.as_i32x16();
let b = b.as_i32x16();
let r = vpcmpd(a, b, IMM3, k1 as i16);
@@ -28074,7 +28074,7 @@ pub unsafe fn _mm512_mask_cmp_epi32_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed signed 32-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=#text=_mm256_cmp_epi32_mask&expand=695)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=#text=_mm256_cmp_epi32_mask&expand=695)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(2)]
@@ -28083,7 +28083,7 @@ pub unsafe fn _mm256_cmp_epi32_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m256i,
b: __m256i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let neg_one = -1;
let a = a.as_i32x8();
let b = b.as_i32x8();
@@ -28093,7 +28093,7 @@ pub unsafe fn _mm256_cmp_epi32_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed signed 32-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_epi32_mask&expand=696)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epi32_mask&expand=696)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -28103,7 +28103,7 @@ pub unsafe fn _mm256_mask_cmp_epi32_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m256i,
b: __m256i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let a = a.as_i32x8();
let b = b.as_i32x8();
let r = vpcmpd256(a, b, IMM3, k1 as i8);
@@ -28112,13 +28112,13 @@ pub unsafe fn _mm256_mask_cmp_epi32_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed signed 32-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_epi32_mask&expand=693)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epi32_mask&expand=693)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))]
pub unsafe fn _mm_cmp_epi32_mask<const IMM3: _MM_CMPINT_ENUM>(a: __m128i, b: __m128i) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let neg_one = -1;
let a = a.as_i32x4();
let b = b.as_i32x4();
@@ -28128,7 +28128,7 @@ pub unsafe fn _mm_cmp_epi32_mask<const IMM3: _MM_CMPINT_ENUM>(a: __m128i, b: __m
/// Compare packed signed 32-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_epi32_mask&expand=694)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epi32_mask&expand=694)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -28138,7 +28138,7 @@ pub unsafe fn _mm_mask_cmp_epi32_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m128i,
b: __m128i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let a = a.as_i32x4();
let b = b.as_i32x4();
let r = vpcmpd128(a, b, IMM3, k1 as i8);
@@ -28147,7 +28147,7 @@ pub unsafe fn _mm_mask_cmp_epi32_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmplt_epu64_mask&expand=1062)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_epu64_mask&expand=1062)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28157,7 +28157,7 @@ pub unsafe fn _mm512_cmplt_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmplt_epu64_mask&expand=1063)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epu64_mask&expand=1063)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28167,7 +28167,7 @@ pub unsafe fn _mm512_mask_cmplt_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i)
/// Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmplt_epu64_mask&expand=1060)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epu64_mask&expand=1060)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28177,7 +28177,7 @@ pub unsafe fn _mm256_cmplt_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmplt_epu64_mask&expand=1061)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epu64_mask&expand=1061)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28187,7 +28187,7 @@ pub unsafe fn _mm256_mask_cmplt_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_epu64_mask&expand=1058)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epu64_mask&expand=1058)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28197,7 +28197,7 @@ pub unsafe fn _mm_cmplt_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmplt_epu64_mask&expand=1059)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epu64_mask&expand=1059)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28207,7 +28207,7 @@ pub unsafe fn _mm_mask_cmplt_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 64-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpgt_epu64_mask&expand=939)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epu64_mask&expand=939)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28217,7 +28217,7 @@ pub unsafe fn _mm512_cmpgt_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpgt_epu64_mask&expand=940)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epu64_mask&expand=940)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28227,7 +28227,7 @@ pub unsafe fn _mm512_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i)
/// Compare packed unsigned 64-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpgt_epu64_mask&expand=937)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epu64_mask&expand=937)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28237,7 +28237,7 @@ pub unsafe fn _mm256_cmpgt_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpgt_epu64_mask&expand=938)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epu64_mask&expand=938)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28247,7 +28247,7 @@ pub unsafe fn _mm256_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed unsigned 64-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_epu64_mask&expand=935)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epu64_mask&expand=935)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28257,7 +28257,7 @@ pub unsafe fn _mm_cmpgt_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpgt_epu64_mask&expand=936)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epu64_mask&expand=936)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28267,7 +28267,7 @@ pub unsafe fn _mm_mask_cmpgt_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 64-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmple_epu64_mask&expand=1001)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epu64_mask&expand=1001)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28277,7 +28277,7 @@ pub unsafe fn _mm512_cmple_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmple_epu64_mask&expand=1002)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epu64_mask&expand=1002)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28287,7 +28287,7 @@ pub unsafe fn _mm512_mask_cmple_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i)
/// Compare packed unsigned 64-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmple_epu64_mask&expand=999)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epu64_mask&expand=999)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28297,7 +28297,7 @@ pub unsafe fn _mm256_cmple_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmple_epu64_mask&expand=1000)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epu64_mask&expand=1000)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28307,7 +28307,7 @@ pub unsafe fn _mm256_mask_cmple_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed unsigned 64-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_epu64_mask&expand=997)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epu64_mask&expand=997)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28317,7 +28317,7 @@ pub unsafe fn _mm_cmple_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmple_epu64_mask&expand=998)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epu64_mask&expand=998)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28327,7 +28327,7 @@ pub unsafe fn _mm_mask_cmple_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 64-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpge_epu64_mask&expand=879)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epu64_mask&expand=879)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28337,7 +28337,7 @@ pub unsafe fn _mm512_cmpge_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpge_epu64_mask&expand=880)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epu64_mask&expand=880)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28347,7 +28347,7 @@ pub unsafe fn _mm512_mask_cmpge_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i)
/// Compare packed unsigned 64-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpge_epu64_mask&expand=877)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epu64_mask&expand=877)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28357,7 +28357,7 @@ pub unsafe fn _mm256_cmpge_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpge_epu64_mask&expand=878)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epu64_mask&expand=878)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28367,7 +28367,7 @@ pub unsafe fn _mm256_mask_cmpge_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed unsigned 64-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_epu64_mask&expand=875)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epu64_mask&expand=875)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28377,7 +28377,7 @@ pub unsafe fn _mm_cmpge_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpge_epu64_mask&expand=876)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epu64_mask&expand=876)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28387,7 +28387,7 @@ pub unsafe fn _mm_mask_cmpge_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 64-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpeq_epu64_mask&expand=813)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epu64_mask&expand=813)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28397,7 +28397,7 @@ pub unsafe fn _mm512_cmpeq_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpeq_epu64_mask&expand=814)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epu64_mask&expand=814)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28407,7 +28407,7 @@ pub unsafe fn _mm512_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i)
/// Compare packed unsigned 64-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpeq_epu64_mask&expand=811)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epu64_mask&expand=811)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28417,7 +28417,7 @@ pub unsafe fn _mm256_cmpeq_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpeq_epu64_mask&expand=812)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epu64_mask&expand=812)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28427,7 +28427,7 @@ pub unsafe fn _mm256_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed unsigned 64-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_epu64_mask&expand=809)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epu64_mask&expand=809)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28437,7 +28437,7 @@ pub unsafe fn _mm_cmpeq_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpeq_epu64_mask&expand=810)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epu64_mask&expand=810)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28447,7 +28447,7 @@ pub unsafe fn _mm_mask_cmpeq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed unsigned 64-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpneq_epu64_mask&expand=1118)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epu64_mask&expand=1118)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28457,7 +28457,7 @@ pub unsafe fn _mm512_cmpneq_epu64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpneq_epu64_mask&expand=1119)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epu64_mask&expand=1119)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28467,7 +28467,7 @@ pub unsafe fn _mm512_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m512i, b: __m512i
/// Compare packed unsigned 64-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpneq_epu64_mask&expand=1116)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epu64_mask&expand=1116)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28477,7 +28477,7 @@ pub unsafe fn _mm256_cmpneq_epu64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpneq_epu64_mask&expand=1117)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epu64_mask&expand=1117)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28487,7 +28487,7 @@ pub unsafe fn _mm256_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m256i, b: __m256i
/// Compare packed unsigned 64-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_epu64_mask&expand=1114)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epu64_mask&expand=1114)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28497,7 +28497,7 @@ pub unsafe fn _mm_cmpneq_epu64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed unsigned 64-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpneq_epu64_mask&expand=1115)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epu64_mask&expand=1115)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpuq
@@ -28507,7 +28507,7 @@ pub unsafe fn _mm_mask_cmpneq_epu64_mask(k1: __mmask8, a: __m128i, b: __m128i) -
/// Compare packed unsigned 64-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_epu64_mask&expand=727)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epu64_mask&expand=727)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(2)]
@@ -28516,7 +28516,7 @@ pub unsafe fn _mm512_cmp_epu64_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m512i,
b: __m512i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let neg_one = -1;
let a = a.as_i64x8();
let b = b.as_i64x8();
@@ -28526,7 +28526,7 @@ pub unsafe fn _mm512_cmp_epu64_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed unsigned 64-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_epu64_mask&expand=728)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epu64_mask&expand=728)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(3)]
@@ -28536,7 +28536,7 @@ pub unsafe fn _mm512_mask_cmp_epu64_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m512i,
b: __m512i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let a = a.as_i64x8();
let b = b.as_i64x8();
let r = vpcmpuq(a, b, IMM3, k1 as i8);
@@ -28545,7 +28545,7 @@ pub unsafe fn _mm512_mask_cmp_epu64_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed unsigned 64-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_epu64_mask&expand=725)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epu64_mask&expand=725)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(2)]
@@ -28554,7 +28554,7 @@ pub unsafe fn _mm256_cmp_epu64_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m256i,
b: __m256i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let neg_one = -1;
let a = a.as_i64x4();
let b = b.as_i64x4();
@@ -28564,7 +28564,7 @@ pub unsafe fn _mm256_cmp_epu64_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed unsigned 64-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_epu64_mask&expand=726)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epu64_mask&expand=726)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -28574,7 +28574,7 @@ pub unsafe fn _mm256_mask_cmp_epu64_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m256i,
b: __m256i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let a = a.as_i64x4();
let b = b.as_i64x4();
let r = vpcmpuq256(a, b, IMM3, k1 as i8);
@@ -28583,13 +28583,13 @@ pub unsafe fn _mm256_mask_cmp_epu64_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed unsigned 64-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_epu64_mask&expand=723)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epu64_mask&expand=723)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))]
pub unsafe fn _mm_cmp_epu64_mask<const IMM3: _MM_CMPINT_ENUM>(a: __m128i, b: __m128i) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let neg_one = -1;
let a = a.as_i64x2();
let b = b.as_i64x2();
@@ -28599,7 +28599,7 @@ pub unsafe fn _mm_cmp_epu64_mask<const IMM3: _MM_CMPINT_ENUM>(a: __m128i, b: __m
/// Compare packed unsigned 64-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_epu64_mask&expand=724)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epu64_mask&expand=724)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -28609,7 +28609,7 @@ pub unsafe fn _mm_mask_cmp_epu64_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m128i,
b: __m128i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let a = a.as_i64x2();
let b = b.as_i64x2();
let r = vpcmpuq128(a, b, IMM3, k1 as i8);
@@ -28618,7 +28618,7 @@ pub unsafe fn _mm_mask_cmp_epu64_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed signed 64-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmplt_epi64_mask&expand=1037)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmplt_epi64_mask&expand=1037)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28628,7 +28628,7 @@ pub unsafe fn _mm512_cmplt_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmplt_epi64_mask&expand=1038)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmplt_epi64_mask&expand=1038)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28638,7 +28638,7 @@ pub unsafe fn _mm512_mask_cmplt_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i)
/// Compare packed signed 64-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmplt_epi64_mask&expand=1035)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmplt_epi64_mask&expand=1035)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28648,7 +28648,7 @@ pub unsafe fn _mm256_cmplt_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmplt_epi64_mask&expand=1036)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmplt_epi64_mask&expand=1036)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28658,7 +28658,7 @@ pub unsafe fn _mm256_mask_cmplt_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed signed 64-bit integers in a and b for less-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_epi64_mask&expand=1033)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi64_mask&expand=1033)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28668,7 +28668,7 @@ pub unsafe fn _mm_cmplt_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for less-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmplt_epi64_mask&expand=1034)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmplt_epi64_mask&expand=1034)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28678,7 +28678,7 @@ pub unsafe fn _mm_mask_cmplt_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed signed 64-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpgt_epi64_mask&expand=913)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpgt_epi64_mask&expand=913)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28688,7 +28688,7 @@ pub unsafe fn _mm512_cmpgt_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpgt_epi64_mask&expand=914)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpgt_epi64_mask&expand=914)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28698,7 +28698,7 @@ pub unsafe fn _mm512_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i)
/// Compare packed signed 64-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpgt_epi64_mask&expand=911)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpgt_epi64_mask&expand=911)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28708,7 +28708,7 @@ pub unsafe fn _mm256_cmpgt_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpgt_epi64_mask&expand=912)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpgt_epi64_mask&expand=912)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28718,7 +28718,7 @@ pub unsafe fn _mm256_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed signed 64-bit integers in a and b for greater-than, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_epi64_mask&expand=909)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi64_mask&expand=909)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28728,7 +28728,7 @@ pub unsafe fn _mm_cmpgt_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for greater-than, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpgt_epi64_mask&expand=910)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpgt_epi64_mask&expand=910)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28738,7 +28738,7 @@ pub unsafe fn _mm_mask_cmpgt_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed signed 64-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmple_epi64_mask&expand=977)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmple_epi64_mask&expand=977)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28748,7 +28748,7 @@ pub unsafe fn _mm512_cmple_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmple_epi64_mask&expand=978)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmple_epi64_mask&expand=978)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28758,7 +28758,7 @@ pub unsafe fn _mm512_mask_cmple_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i)
/// Compare packed signed 64-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmple_epi64_mask&expand=975)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmple_epi64_mask&expand=975)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28768,7 +28768,7 @@ pub unsafe fn _mm256_cmple_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmple_epi64_mask&expand=976)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmple_epi64_mask&expand=976)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28778,7 +28778,7 @@ pub unsafe fn _mm256_mask_cmple_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed signed 64-bit integers in a and b for less-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_epi64_mask&expand=973)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_epi64_mask&expand=973)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28788,7 +28788,7 @@ pub unsafe fn _mm_cmple_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for less-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmple_epi64_mask&expand=974)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmple_epi64_mask&expand=974)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28798,7 +28798,7 @@ pub unsafe fn _mm_mask_cmple_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed signed 64-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpge_epi64_mask&expand=855)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpge_epi64_mask&expand=855)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28808,7 +28808,7 @@ pub unsafe fn _mm512_cmpge_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpge_epi64_mask&expand=856)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpge_epi64_mask&expand=856)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28818,7 +28818,7 @@ pub unsafe fn _mm512_mask_cmpge_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i)
/// Compare packed signed 64-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpge_epi64_mask&expand=853)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpge_epi64_mask&expand=853)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28828,7 +28828,7 @@ pub unsafe fn _mm256_cmpge_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpge_epi64_mask&expand=854)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpge_epi64_mask&expand=854)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28838,7 +28838,7 @@ pub unsafe fn _mm256_mask_cmpge_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed signed 64-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_epi64_mask&expand=851)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_epi64_mask&expand=851)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28848,7 +28848,7 @@ pub unsafe fn _mm_cmpge_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for greater-than-or-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpge_epi64_mask&expand=852)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpge_epi64_mask&expand=852)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28858,7 +28858,7 @@ pub unsafe fn _mm_mask_cmpge_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed 64-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpeq_epi64_mask&expand=787)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpeq_epi64_mask&expand=787)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28868,7 +28868,7 @@ pub unsafe fn _mm512_cmpeq_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compare packed 64-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpeq_epi64_mask&expand=788)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpeq_epi64_mask&expand=788)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28878,7 +28878,7 @@ pub unsafe fn _mm512_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i)
/// Compare packed 64-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpeq_epi64_mask&expand=785)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpeq_epi64_mask&expand=785)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28888,7 +28888,7 @@ pub unsafe fn _mm256_cmpeq_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed 64-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpeq_epi64_mask&expand=786)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpeq_epi64_mask&expand=786)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28898,7 +28898,7 @@ pub unsafe fn _mm256_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i)
/// Compare packed 64-bit integers in a and b for equality, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_epi64_mask&expand=783)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi64_mask&expand=783)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28908,7 +28908,7 @@ pub unsafe fn _mm_cmpeq_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed 64-bit integers in a and b for equality, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpeq_epi64_mask&expand=784)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpeq_epi64_mask&expand=784)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28918,7 +28918,7 @@ pub unsafe fn _mm_mask_cmpeq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) ->
/// Compare packed signed 64-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmpneq_epi64_mask&expand=1094)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmpneq_epi64_mask&expand=1094)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28928,7 +28928,7 @@ pub unsafe fn _mm512_cmpneq_epi64_mask(a: __m512i, b: __m512i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmpneq_epi64_mask&expand=1095)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmpneq_epi64_mask&expand=1095)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28938,7 +28938,7 @@ pub unsafe fn _mm512_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m512i, b: __m512i
/// Compare packed signed 64-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmpneq_epi64_mask&expand=1092)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmpneq_epi64_mask&expand=1092)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28948,7 +28948,7 @@ pub unsafe fn _mm256_cmpneq_epi64_mask(a: __m256i, b: __m256i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmpneq_epi64_mask&expand=1093)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmpneq_epi64_mask&expand=1093)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28958,7 +28958,7 @@ pub unsafe fn _mm256_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m256i, b: __m256i
/// Compare packed signed 64-bit integers in a and b for not-equal, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_epi64_mask&expand=1090)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_epi64_mask&expand=1090)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28968,7 +28968,7 @@ pub unsafe fn _mm_cmpneq_epi64_mask(a: __m128i, b: __m128i) -> __mmask8 {
/// Compare packed signed 64-bit integers in a and b for not-equal, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmpneq_epi64_mask&expand=1091)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmpneq_epi64_mask&expand=1091)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpcmp))] //should be vpcmpq
@@ -28978,7 +28978,7 @@ pub unsafe fn _mm_mask_cmpneq_epi64_mask(k1: __mmask8, a: __m128i, b: __m128i) -
/// Compare packed signed 64-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_epi64_mask&expand=703)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_cmp_epi64_mask&expand=703)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(2)]
@@ -28987,7 +28987,7 @@ pub unsafe fn _mm512_cmp_epi64_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m512i,
b: __m512i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let neg_one = -1;
let a = a.as_i64x8();
let b = b.as_i64x8();
@@ -28997,7 +28997,7 @@ pub unsafe fn _mm512_cmp_epi64_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed signed 64-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_epi64_mask&expand=704)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cmp_epi64_mask&expand=704)
#[inline]
#[target_feature(enable = "avx512f")]
#[rustc_legacy_const_generics(3)]
@@ -29007,7 +29007,7 @@ pub unsafe fn _mm512_mask_cmp_epi64_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m512i,
b: __m512i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let a = a.as_i64x8();
let b = b.as_i64x8();
let r = vpcmpq(a, b, IMM3, k1 as i8);
@@ -29016,7 +29016,7 @@ pub unsafe fn _mm512_mask_cmp_epi64_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed signed 64-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_epi64_mask&expand=701)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_cmp_epi64_mask&expand=701)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(2)]
@@ -29025,7 +29025,7 @@ pub unsafe fn _mm256_cmp_epi64_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m256i,
b: __m256i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let neg_one = -1;
let a = a.as_i64x4();
let b = b.as_i64x4();
@@ -29035,7 +29035,7 @@ pub unsafe fn _mm256_cmp_epi64_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed signed 64-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_epi64_mask&expand=702)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cmp_epi64_mask&expand=702)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -29045,7 +29045,7 @@ pub unsafe fn _mm256_mask_cmp_epi64_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m256i,
b: __m256i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let a = a.as_i64x4();
let b = b.as_i64x4();
let r = vpcmpq256(a, b, IMM3, k1 as i8);
@@ -29054,13 +29054,13 @@ pub unsafe fn _mm256_mask_cmp_epi64_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Compare packed signed 64-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_epi64_mask&expand=699)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmp_epi64_mask&expand=699)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(2)]
#[cfg_attr(test, assert_instr(vpcmp, IMM3 = 0))]
pub unsafe fn _mm_cmp_epi64_mask<const IMM3: _MM_CMPINT_ENUM>(a: __m128i, b: __m128i) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let neg_one = -1;
let a = a.as_i64x2();
let b = b.as_i64x2();
@@ -29070,7 +29070,7 @@ pub unsafe fn _mm_cmp_epi64_mask<const IMM3: _MM_CMPINT_ENUM>(a: __m128i, b: __m
/// Compare packed signed 64-bit integers in a and b based on the comparison operand specified by imm8, and store the results in mask vector k using zeromask k1 (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_epi64_mask&expand=700)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cmp_epi64_mask&expand=700)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[rustc_legacy_const_generics(3)]
@@ -29080,7 +29080,7 @@ pub unsafe fn _mm_mask_cmp_epi64_mask<const IMM3: _MM_CMPINT_ENUM>(
a: __m128i,
b: __m128i,
) -> __mmask8 {
- static_assert_imm3!(IMM3);
+ static_assert_uimm_bits!(IMM3, 3);
let a = a.as_i64x2();
let b = b.as_i64x2();
let r = vpcmpq128(a, b, IMM3, k1 as i8);
@@ -29089,7 +29089,7 @@ pub unsafe fn _mm_mask_cmp_epi64_mask<const IMM3: _MM_CMPINT_ENUM>(
/// Reduce the packed 32-bit integers in a by addition. Returns the sum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_add_epi32&expand=4556)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_add_epi32&expand=4556)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_add_epi32(a: __m512i) -> i32 {
@@ -29098,7 +29098,7 @@ pub unsafe fn _mm512_reduce_add_epi32(a: __m512i) -> i32 {
/// Reduce the packed 32-bit integers in a by addition using mask k. Returns the sum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_add_epi32&expand=4555)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_add_epi32&expand=4555)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_add_epi32(k: __mmask16, a: __m512i) -> i32 {
@@ -29111,7 +29111,7 @@ pub unsafe fn _mm512_mask_reduce_add_epi32(k: __mmask16, a: __m512i) -> i32 {
/// Reduce the packed 64-bit integers in a by addition. Returns the sum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_add_epi64&expand=4558)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_add_epi64&expand=4558)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_add_epi64(a: __m512i) -> i64 {
@@ -29120,7 +29120,7 @@ pub unsafe fn _mm512_reduce_add_epi64(a: __m512i) -> i64 {
/// Reduce the packed 64-bit integers in a by addition using mask k. Returns the sum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_add_epi64&expand=4557)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_add_epi64&expand=4557)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_add_epi64(k: __mmask8, a: __m512i) -> i64 {
@@ -29133,7 +29133,7 @@ pub unsafe fn _mm512_mask_reduce_add_epi64(k: __mmask8, a: __m512i) -> i64 {
/// Reduce the packed single-precision (32-bit) floating-point elements in a by addition. Returns the sum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_add_ps&expand=4562)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_add_ps&expand=4562)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_add_ps(a: __m512) -> f32 {
@@ -29142,7 +29142,7 @@ pub unsafe fn _mm512_reduce_add_ps(a: __m512) -> f32 {
/// Reduce the packed single-precision (32-bit) floating-point elements in a by addition using mask k. Returns the sum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_add_ps&expand=4561)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_add_ps&expand=4561)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_add_ps(k: __mmask16, a: __m512) -> f32 {
@@ -29155,7 +29155,7 @@ pub unsafe fn _mm512_mask_reduce_add_ps(k: __mmask16, a: __m512) -> f32 {
/// Reduce the packed double-precision (64-bit) floating-point elements in a by addition. Returns the sum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_add_pd&expand=4560)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_add_pd&expand=4560)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_add_pd(a: __m512d) -> f64 {
@@ -29164,7 +29164,7 @@ pub unsafe fn _mm512_reduce_add_pd(a: __m512d) -> f64 {
/// Reduce the packed double-precision (64-bit) floating-point elements in a by addition using mask k. Returns the sum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_add_pd&expand=4559)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_add_pd&expand=4559)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_add_pd(k: __mmask8, a: __m512d) -> f64 {
@@ -29177,7 +29177,7 @@ pub unsafe fn _mm512_mask_reduce_add_pd(k: __mmask8, a: __m512d) -> f64 {
/// Reduce the packed 32-bit integers in a by multiplication. Returns the product of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_mul_epi32&expand=4600)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_mul_epi32&expand=4600)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_mul_epi32(a: __m512i) -> i32 {
@@ -29186,7 +29186,7 @@ pub unsafe fn _mm512_reduce_mul_epi32(a: __m512i) -> i32 {
/// Reduce the packed 32-bit integers in a by multiplication using mask k. Returns the product of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_mul_epi32&expand=4599)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_mul_epi32&expand=4599)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_mul_epi32(k: __mmask16, a: __m512i) -> i32 {
@@ -29199,7 +29199,7 @@ pub unsafe fn _mm512_mask_reduce_mul_epi32(k: __mmask16, a: __m512i) -> i32 {
/// Reduce the packed 64-bit integers in a by multiplication. Returns the product of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_mul_epi64&expand=4602)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_mul_epi64&expand=4602)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_mul_epi64(a: __m512i) -> i64 {
@@ -29208,7 +29208,7 @@ pub unsafe fn _mm512_reduce_mul_epi64(a: __m512i) -> i64 {
/// Reduce the packed 64-bit integers in a by multiplication using mask k. Returns the product of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_mul_epi64&expand=4601)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_mul_epi64&expand=4601)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_mul_epi64(k: __mmask8, a: __m512i) -> i64 {
@@ -29221,7 +29221,7 @@ pub unsafe fn _mm512_mask_reduce_mul_epi64(k: __mmask8, a: __m512i) -> i64 {
/// Reduce the packed single-precision (32-bit) floating-point elements in a by multiplication. Returns the product of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_mul_ps&expand=4606)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_mul_ps&expand=4606)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_mul_ps(a: __m512) -> f32 {
@@ -29230,7 +29230,7 @@ pub unsafe fn _mm512_reduce_mul_ps(a: __m512) -> f32 {
/// Reduce the packed single-precision (32-bit) floating-point elements in a by multiplication using mask k. Returns the product of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_mul_ps&expand=4605)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_mul_ps&expand=4605)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_mul_ps(k: __mmask16, a: __m512) -> f32 {
@@ -29243,7 +29243,7 @@ pub unsafe fn _mm512_mask_reduce_mul_ps(k: __mmask16, a: __m512) -> f32 {
/// Reduce the packed double-precision (64-bit) floating-point elements in a by multiplication. Returns the product of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_mul_pd&expand=4604)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_mul_pd&expand=4604)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_mul_pd(a: __m512d) -> f64 {
@@ -29252,7 +29252,7 @@ pub unsafe fn _mm512_reduce_mul_pd(a: __m512d) -> f64 {
/// Reduce the packed double-precision (64-bit) floating-point elements in a by multiplication using mask k. Returns the product of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_mul_pd&expand=4603)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_mul_pd&expand=4603)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_mul_pd(k: __mmask8, a: __m512d) -> f64 {
@@ -29265,7 +29265,7 @@ pub unsafe fn _mm512_mask_reduce_mul_pd(k: __mmask8, a: __m512d) -> f64 {
/// Reduce the packed signed 32-bit integers in a by maximum. Returns the maximum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_max_epi32&expand=4576)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_max_epi32&expand=4576)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_max_epi32(a: __m512i) -> i32 {
@@ -29274,7 +29274,7 @@ pub unsafe fn _mm512_reduce_max_epi32(a: __m512i) -> i32 {
/// Reduce the packed signed 32-bit integers in a by maximum using mask k. Returns the maximum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_max_epi32&expand=4575)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_max_epi32&expand=4575)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_max_epi32(k: __mmask16, a: __m512i) -> i32 {
@@ -29287,7 +29287,7 @@ pub unsafe fn _mm512_mask_reduce_max_epi32(k: __mmask16, a: __m512i) -> i32 {
/// Reduce the packed signed 64-bit integers in a by maximum. Returns the maximum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_max_epi64&expand=4578)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_max_epi64&expand=4578)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_max_epi64(a: __m512i) -> i64 {
@@ -29296,7 +29296,7 @@ pub unsafe fn _mm512_reduce_max_epi64(a: __m512i) -> i64 {
/// Reduce the packed signed 64-bit integers in a by maximum using mask k. Returns the maximum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_max_epi64&expand=4577)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_max_epi64&expand=4577)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_max_epi64(k: __mmask8, a: __m512i) -> i64 {
@@ -29309,7 +29309,7 @@ pub unsafe fn _mm512_mask_reduce_max_epi64(k: __mmask8, a: __m512i) -> i64 {
/// Reduce the packed unsigned 32-bit integers in a by maximum. Returns the maximum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_max_epu32&expand=4580)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_max_epu32&expand=4580)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_max_epu32(a: __m512i) -> u32 {
@@ -29318,7 +29318,7 @@ pub unsafe fn _mm512_reduce_max_epu32(a: __m512i) -> u32 {
/// Reduce the packed unsigned 32-bit integers in a by maximum using mask k. Returns the maximum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_max_epu32&expand=4579)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_max_epu32&expand=4579)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_max_epu32(k: __mmask16, a: __m512i) -> u32 {
@@ -29331,7 +29331,7 @@ pub unsafe fn _mm512_mask_reduce_max_epu32(k: __mmask16, a: __m512i) -> u32 {
/// Reduce the packed unsigned 64-bit integers in a by maximum. Returns the maximum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_max_epu64&expand=4582)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_max_epu64&expand=4582)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_max_epu64(a: __m512i) -> u64 {
@@ -29340,7 +29340,7 @@ pub unsafe fn _mm512_reduce_max_epu64(a: __m512i) -> u64 {
/// Reduce the packed unsigned 64-bit integers in a by maximum using mask k. Returns the maximum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_max_epu64&expand=4581)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_max_epu64&expand=4581)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_max_epu64(k: __mmask8, a: __m512i) -> u64 {
@@ -29353,7 +29353,7 @@ pub unsafe fn _mm512_mask_reduce_max_epu64(k: __mmask8, a: __m512i) -> u64 {
/// Reduce the packed single-precision (32-bit) floating-point elements in a by maximum. Returns the maximum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_max_ps&expand=4586)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_max_ps&expand=4586)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_max_ps(a: __m512) -> f32 {
@@ -29362,7 +29362,7 @@ pub unsafe fn _mm512_reduce_max_ps(a: __m512) -> f32 {
/// Reduce the packed single-precision (32-bit) floating-point elements in a by maximum using mask k. Returns the maximum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_max_ps&expand=4585)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_max_ps&expand=4585)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_max_ps(k: __mmask16, a: __m512) -> f32 {
@@ -29375,7 +29375,7 @@ pub unsafe fn _mm512_mask_reduce_max_ps(k: __mmask16, a: __m512) -> f32 {
/// Reduce the packed double-precision (64-bit) floating-point elements in a by maximum. Returns the maximum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_max_pd&expand=4584)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_max_pd&expand=4584)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_max_pd(a: __m512d) -> f64 {
@@ -29384,7 +29384,7 @@ pub unsafe fn _mm512_reduce_max_pd(a: __m512d) -> f64 {
/// Reduce the packed double-precision (64-bit) floating-point elements in a by maximum using mask k. Returns the maximum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_max_pd&expand=4583)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_max_pd&expand=4583)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_max_pd(k: __mmask8, a: __m512d) -> f64 {
@@ -29397,7 +29397,7 @@ pub unsafe fn _mm512_mask_reduce_max_pd(k: __mmask8, a: __m512d) -> f64 {
/// Reduce the packed signed 32-bit integers in a by minimum. Returns the minimum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_min_epi32&expand=4588)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_min_epi32&expand=4588)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_min_epi32(a: __m512i) -> i32 {
@@ -29406,7 +29406,7 @@ pub unsafe fn _mm512_reduce_min_epi32(a: __m512i) -> i32 {
/// Reduce the packed signed 32-bit integers in a by maximum using mask k. Returns the minimum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_min_epi32&expand=4587)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_min_epi32&expand=4587)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_min_epi32(k: __mmask16, a: __m512i) -> i32 {
@@ -29419,7 +29419,7 @@ pub unsafe fn _mm512_mask_reduce_min_epi32(k: __mmask16, a: __m512i) -> i32 {
/// Reduce the packed signed 64-bit integers in a by minimum. Returns the minimum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_min_epi64&expand=4590)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_min_epi64&expand=4590)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_min_epi64(a: __m512i) -> i64 {
@@ -29428,7 +29428,7 @@ pub unsafe fn _mm512_reduce_min_epi64(a: __m512i) -> i64 {
/// Reduce the packed signed 64-bit integers in a by maximum using mask k. Returns the minimum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_min_epi64&expand=4589)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_min_epi64&expand=4589)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_min_epi64(k: __mmask8, a: __m512i) -> i64 {
@@ -29441,7 +29441,7 @@ pub unsafe fn _mm512_mask_reduce_min_epi64(k: __mmask8, a: __m512i) -> i64 {
/// Reduce the packed unsigned 32-bit integers in a by minimum. Returns the minimum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_min_epu32&expand=4592)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_min_epu32&expand=4592)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_min_epu32(a: __m512i) -> u32 {
@@ -29450,7 +29450,7 @@ pub unsafe fn _mm512_reduce_min_epu32(a: __m512i) -> u32 {
/// Reduce the packed unsigned 32-bit integers in a by maximum using mask k. Returns the minimum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_min_epu32&expand=4591)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_min_epu32&expand=4591)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_min_epu32(k: __mmask16, a: __m512i) -> u32 {
@@ -29463,7 +29463,7 @@ pub unsafe fn _mm512_mask_reduce_min_epu32(k: __mmask16, a: __m512i) -> u32 {
/// Reduce the packed unsigned 64-bit integers in a by minimum. Returns the minimum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_min_epu64&expand=4594)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_min_epu64&expand=4594)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_min_epu64(a: __m512i) -> u64 {
@@ -29472,7 +29472,7 @@ pub unsafe fn _mm512_reduce_min_epu64(a: __m512i) -> u64 {
/// Reduce the packed signed 64-bit integers in a by maximum using mask k. Returns the minimum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_min_epi64&expand=4589)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_min_epi64&expand=4589)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_min_epu64(k: __mmask8, a: __m512i) -> u64 {
@@ -29485,7 +29485,7 @@ pub unsafe fn _mm512_mask_reduce_min_epu64(k: __mmask8, a: __m512i) -> u64 {
/// Reduce the packed single-precision (32-bit) floating-point elements in a by minimum. Returns the minimum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_min_ps&expand=4598)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_min_ps&expand=4598)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_min_ps(a: __m512) -> f32 {
@@ -29494,7 +29494,7 @@ pub unsafe fn _mm512_reduce_min_ps(a: __m512) -> f32 {
/// Reduce the packed single-precision (32-bit) floating-point elements in a by maximum using mask k. Returns the minimum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_min_ps&expand=4597)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_min_ps&expand=4597)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_min_ps(k: __mmask16, a: __m512) -> f32 {
@@ -29507,7 +29507,7 @@ pub unsafe fn _mm512_mask_reduce_min_ps(k: __mmask16, a: __m512) -> f32 {
/// Reduce the packed double-precision (64-bit) floating-point elements in a by minimum. Returns the minimum of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_min_pd&expand=4596)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_min_pd&expand=4596)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_min_pd(a: __m512d) -> f64 {
@@ -29516,7 +29516,7 @@ pub unsafe fn _mm512_reduce_min_pd(a: __m512d) -> f64 {
/// Reduce the packed double-precision (64-bit) floating-point elements in a by maximum using mask k. Returns the minimum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_min_pd&expand=4595)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_min_pd&expand=4595)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_min_pd(k: __mmask8, a: __m512d) -> f64 {
@@ -29529,7 +29529,7 @@ pub unsafe fn _mm512_mask_reduce_min_pd(k: __mmask8, a: __m512d) -> f64 {
/// Reduce the packed 32-bit integers in a by bitwise AND. Returns the bitwise AND of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_and_epi32&expand=4564)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_and_epi32&expand=4564)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_and_epi32(a: __m512i) -> i32 {
@@ -29538,7 +29538,7 @@ pub unsafe fn _mm512_reduce_and_epi32(a: __m512i) -> i32 {
/// Reduce the packed 32-bit integers in a by bitwise AND using mask k. Returns the bitwise AND of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_and_epi32&expand=4563)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_and_epi32&expand=4563)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_and_epi32(k: __mmask16, a: __m512i) -> i32 {
@@ -29551,7 +29551,7 @@ pub unsafe fn _mm512_mask_reduce_and_epi32(k: __mmask16, a: __m512i) -> i32 {
/// Reduce the packed 64-bit integers in a by bitwise AND. Returns the bitwise AND of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_and_epi64&expand=4566)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_and_epi64&expand=4566)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_and_epi64(a: __m512i) -> i64 {
@@ -29560,7 +29560,7 @@ pub unsafe fn _mm512_reduce_and_epi64(a: __m512i) -> i64 {
/// Reduce the packed 64-bit integers in a by addition using mask k. Returns the sum of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_add_epi64&expand=4557)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_add_epi64&expand=4557)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_and_epi64(k: __mmask8, a: __m512i) -> i64 {
@@ -29574,7 +29574,7 @@ pub unsafe fn _mm512_mask_reduce_and_epi64(k: __mmask8, a: __m512i) -> i64 {
/// Reduce the packed 32-bit integers in a by bitwise OR. Returns the bitwise OR of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_or_epi32&expand=4608)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_or_epi32&expand=4608)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_or_epi32(a: __m512i) -> i32 {
@@ -29583,7 +29583,7 @@ pub unsafe fn _mm512_reduce_or_epi32(a: __m512i) -> i32 {
/// Reduce the packed 32-bit integers in a by bitwise OR using mask k. Returns the bitwise OR of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_or_epi32&expand=4607)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_or_epi32&expand=4607)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_or_epi32(k: __mmask16, a: __m512i) -> i32 {
@@ -29596,7 +29596,7 @@ pub unsafe fn _mm512_mask_reduce_or_epi32(k: __mmask16, a: __m512i) -> i32 {
/// Reduce the packed 64-bit integers in a by bitwise OR. Returns the bitwise OR of all elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_or_epi64&expand=4610)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_reduce_or_epi64&expand=4610)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_reduce_or_epi64(a: __m512i) -> i64 {
@@ -29605,7 +29605,7 @@ pub unsafe fn _mm512_reduce_or_epi64(a: __m512i) -> i64 {
/// Reduce the packed 64-bit integers in a by bitwise OR using mask k. Returns the bitwise OR of all active elements in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_or_epi64&expand=4609)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_reduce_or_epi64&expand=4609)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_reduce_or_epi64(k: __mmask8, a: __m512i) -> i64 {
@@ -29618,7 +29618,7 @@ pub unsafe fn _mm512_mask_reduce_or_epi64(k: __mmask8, a: __m512i) -> i64 {
/// Returns vector of type `__m512d` with undefined elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_undefined_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_undefined_pd)
#[inline]
#[target_feature(enable = "avx512f")]
// This intrinsic has no corresponding instruction.
@@ -29628,7 +29628,7 @@ pub unsafe fn _mm512_undefined_pd() -> __m512d {
/// Returns vector of type `__m512` with undefined elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_undefined_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_undefined_ps)
#[inline]
#[target_feature(enable = "avx512f")]
// This intrinsic has no corresponding instruction.
@@ -29638,7 +29638,7 @@ pub unsafe fn _mm512_undefined_ps() -> __m512 {
/// Return vector of type __m512i with undefined elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_undefined_epi32&expand=5995)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_undefined_epi32&expand=5995)
#[inline]
#[target_feature(enable = "avx512f")]
// This intrinsic has no corresponding instruction.
@@ -29648,7 +29648,7 @@ pub unsafe fn _mm512_undefined_epi32() -> __m512i {
/// Return vector of type __m512 with undefined elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_undefined&expand=5994)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_undefined&expand=5994)
#[inline]
#[target_feature(enable = "avx512f")]
// This intrinsic has no corresponding instruction.
@@ -29658,7 +29658,7 @@ pub unsafe fn _mm512_undefined() -> __m512 {
/// Load 512-bits (composed of 16 packed 32-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_loadu_epi32&expand=3377)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_epi32&expand=3377)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32
@@ -29668,7 +29668,7 @@ pub unsafe fn _mm512_loadu_epi32(mem_addr: *const i32) -> __m512i {
/// Load 256-bits (composed of 8 packed 32-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu_epi32&expand=3374)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu_epi32&expand=3374)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32
@@ -29678,7 +29678,7 @@ pub unsafe fn _mm256_loadu_epi32(mem_addr: *const i32) -> __m256i {
/// Load 128-bits (composed of 4 packed 32-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_epi32&expand=3371)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_epi32&expand=3371)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32
@@ -29688,7 +29688,7 @@ pub unsafe fn _mm_loadu_epi32(mem_addr: *const i32) -> __m128i {
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi32_storeu_epi16&expand=1460)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_storeu_epi16&expand=1460)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovdw))]
@@ -29698,7 +29698,7 @@ pub unsafe fn _mm512_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask16,
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi32_storeu_epi8&expand=1462)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_storeu_epi8&expand=1462)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdw))]
@@ -29708,7 +29708,7 @@ pub unsafe fn _mm256_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8,
/// Convert packed 32-bit integers in a to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi32_storeu_epi8&expand=1461)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_storeu_epi8&expand=1461)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdw))]
@@ -29718,7 +29718,7 @@ pub unsafe fn _mm_mask_cvtepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a:
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtsepi32_storeu_epi16&expand=1833)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi32_storeu_epi16&expand=1833)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
@@ -29728,7 +29728,7 @@ pub unsafe fn _mm512_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask16
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtsepi32_storeu_epi16&expand=1832)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi32_storeu_epi16&expand=1832)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
@@ -29738,7 +29738,7 @@ pub unsafe fn _mm256_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8,
/// Convert packed signed 32-bit integers in a to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsepi32_storeu_epi16&expand=1831)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi32_storeu_epi16&expand=1831)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdw))]
@@ -29748,7 +29748,7 @@ pub unsafe fn _mm_mask_cvtsepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a:
/// Convert packed unsigned 32-bit integers in a to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtusepi32_storeu_epi16&expand=2068)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi32_storeu_epi16&expand=2068)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
@@ -29758,7 +29758,7 @@ pub unsafe fn _mm512_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask1
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtusepi32_storeu_epi16&expand=2067)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi32_storeu_epi16&expand=2067)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
@@ -29768,7 +29768,7 @@ pub unsafe fn _mm256_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8
/// Convert packed unsigned 32-bit integers in a to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtusepi32_storeu_epi16&expand=2066)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi32_storeu_epi16&expand=2066)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdw))]
@@ -29778,7 +29778,7 @@ pub unsafe fn _mm_mask_cvtusepi32_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi32_storeu_epi8&expand=1463)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi32_storeu_epi8&expand=1463)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovdb))]
@@ -29788,7 +29788,7 @@ pub unsafe fn _mm512_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16,
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi32_storeu_epi8&expand=1462)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi32_storeu_epi8&expand=1462)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdb))]
@@ -29798,7 +29798,7 @@ pub unsafe fn _mm256_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a
/// Convert packed 32-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi32_storeu_epi8&expand=1461)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi32_storeu_epi8&expand=1461)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovdb))]
@@ -29808,7 +29808,7 @@ pub unsafe fn _mm_mask_cvtepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: _
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtsepi32_storeu_epi8&expand=1836)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi32_storeu_epi8&expand=1836)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
@@ -29818,7 +29818,7 @@ pub unsafe fn _mm512_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16,
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtsepi32_storeu_epi8&expand=1835)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi32_storeu_epi8&expand=1835)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
@@ -29828,7 +29828,7 @@ pub unsafe fn _mm256_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8,
/// Convert packed signed 32-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsepi32_storeu_epi8&expand=1834)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi32_storeu_epi8&expand=1834)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsdb))]
@@ -29838,7 +29838,7 @@ pub unsafe fn _mm_mask_cvtsepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
/// Convert packed unsigned 32-bit integers in a to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtusepi32_storeu_epi8&expand=2071)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi32_storeu_epi8&expand=2071)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
@@ -29848,7 +29848,7 @@ pub unsafe fn _mm512_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask16
/// Convert packed unsigned 32-bit integers in a to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtusepi32_storeu_epi8&expand=2070)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi32_storeu_epi8&expand=2070)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
@@ -29858,7 +29858,7 @@ pub unsafe fn _mm256_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8,
/// Convert packed unsigned 32-bit integers in a to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtusepi32_storeu_epi8&expand=2069)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi32_storeu_epi8&expand=2069)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusdb))]
@@ -29868,7 +29868,7 @@ pub unsafe fn _mm_mask_cvtusepi32_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi64_storeu_epi16&expand=1513)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi64_storeu_epi16&expand=1513)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqw))]
@@ -29878,7 +29878,7 @@ pub unsafe fn _mm512_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8,
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi64_storeu_epi16&expand=1512)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi64_storeu_epi16&expand=1512)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqw))]
@@ -29888,7 +29888,7 @@ pub unsafe fn _mm256_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8,
/// Convert packed 64-bit integers in a to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi64_storeu_epi16&expand=1511)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi64_storeu_epi16&expand=1511)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqw))]
@@ -29898,7 +29898,7 @@ pub unsafe fn _mm_mask_cvtepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a:
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtsepi64_storeu_epi16&expand=1866)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi64_storeu_epi16&expand=1866)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
@@ -29908,7 +29908,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8,
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtsepi64_storeu_epi16&expand=1865)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi64_storeu_epi16&expand=1865)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
@@ -29918,7 +29918,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8,
/// Convert packed signed 64-bit integers in a to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsepi64_storeu_epi16&expand=1864)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi64_storeu_epi16&expand=1864)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqw))]
@@ -29928,7 +29928,7 @@ pub unsafe fn _mm_mask_cvtsepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a:
/// Convert packed unsigned 64-bit integers in a to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtusepi64_storeu_epi16&expand=2101)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi64_storeu_epi16&expand=2101)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
@@ -29938,7 +29938,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8
/// Convert packed unsigned 64-bit integers in a to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtusepi64_storeu_epi16&expand=2100)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi64_storeu_epi16&expand=2100)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
@@ -29948,7 +29948,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8
/// Convert packed unsigned 64-bit integers in a to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtusepi64_storeu_epi16&expand=2099)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi64_storeu_epi16&expand=2099)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqw))]
@@ -29958,7 +29958,7 @@ pub unsafe fn _mm_mask_cvtusepi64_storeu_epi16(mem_addr: *mut i8, k: __mmask8, a
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi64_storeu_epi8&expand=1519)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi64_storeu_epi8&expand=1519)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqb))]
@@ -29968,7 +29968,7 @@ pub unsafe fn _mm512_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi64_storeu_epi8&expand=1518)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi64_storeu_epi8&expand=1518)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqb))]
@@ -29978,7 +29978,7 @@ pub unsafe fn _mm256_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a
/// Convert packed 64-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi64_storeu_epi8&expand=1517)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi64_storeu_epi8&expand=1517)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqb))]
@@ -29988,7 +29988,7 @@ pub unsafe fn _mm_mask_cvtepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: _
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtsepi64_storeu_epi8&expand=1872)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi64_storeu_epi8&expand=1872)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
@@ -29998,7 +29998,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8,
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtsepi64_storeu_epi8&expand=1871)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi64_storeu_epi8&expand=1871)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
@@ -30008,7 +30008,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8,
/// Convert packed signed 64-bit integers in a to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsepi64_storeu_epi8&expand=1870)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi64_storeu_epi8&expand=1870)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqb))]
@@ -30018,7 +30018,7 @@ pub unsafe fn _mm_mask_cvtsepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
/// Convert packed unsigned 64-bit integers in a to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtusepi64_storeu_epi8&expand=2107)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi64_storeu_epi8&expand=2107)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
@@ -30028,7 +30028,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8,
/// Convert packed unsigned 64-bit integers in a to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtusepi64_storeu_epi8&expand=2106)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi64_storeu_epi8&expand=2106)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
@@ -30038,7 +30038,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8,
/// Convert packed unsigned 64-bit integers in a to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtusepi64_storeu_epi8&expand=2105)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi64_storeu_epi8&expand=2105)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqb))]
@@ -30048,7 +30048,7 @@ pub unsafe fn _mm_mask_cvtusepi64_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
///Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi64_storeu_epi32&expand=1516)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtepi64_storeu_epi32&expand=1516)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovqd))]
@@ -30058,7 +30058,7 @@ pub unsafe fn _mm512_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8,
///Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi64_storeu_epi32&expand=1515)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtepi64_storeu_epi32&expand=1515)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqd))]
@@ -30068,7 +30068,7 @@ pub unsafe fn _mm256_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8,
///Convert packed 64-bit integers in a to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi64_storeu_epi32&expand=1514)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtepi64_storeu_epi32&expand=1514)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovqd))]
@@ -30078,7 +30078,7 @@ pub unsafe fn _mm_mask_cvtepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a:
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtsepi64_storeu_epi32&expand=1869)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtsepi64_storeu_epi32&expand=1869)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
@@ -30088,7 +30088,7 @@ pub unsafe fn _mm512_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8,
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtsepi64_storeu_epi32&expand=1868)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtsepi64_storeu_epi32&expand=1868)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
@@ -30098,7 +30098,7 @@ pub unsafe fn _mm256_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8,
/// Convert packed signed 64-bit integers in a to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsepi64_storeu_epi32&expand=1867)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtsepi64_storeu_epi32&expand=1867)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovsqd))]
@@ -30108,7 +30108,7 @@ pub unsafe fn _mm_mask_cvtsepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a:
/// Convert packed unsigned 64-bit integers in a to packed 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtusepi64_storeu_epi32&expand=2104)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_cvtusepi64_storeu_epi32&expand=2104)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
@@ -30118,7 +30118,7 @@ pub unsafe fn _mm512_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8
/// Convert packed unsigned 64-bit integers in a to packed 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtusepi64_storeu_epi32&expand=2103)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_cvtusepi64_storeu_epi32&expand=2103)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
@@ -30128,7 +30128,7 @@ pub unsafe fn _mm256_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8
/// Convert packed unsigned 64-bit integers in a to packed 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtusepi64_storeu_epi32&expand=2102)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_cvtusepi64_storeu_epi32&expand=2102)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vpmovusqd))]
@@ -30138,7 +30138,7 @@ pub unsafe fn _mm_mask_cvtusepi64_storeu_epi32(mem_addr: *mut i8, k: __mmask8, a
/// Store 512-bits (composed of 16 packed 32-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_storeu_epi32&expand=5628)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_epi32&expand=5628)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32
@@ -30148,7 +30148,7 @@ pub unsafe fn _mm512_storeu_epi32(mem_addr: *mut i32, a: __m512i) {
/// Store 256-bits (composed of 8 packed 32-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu_epi32&expand=5626)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu_epi32&expand=5626)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32
@@ -30158,7 +30158,7 @@ pub unsafe fn _mm256_storeu_epi32(mem_addr: *mut i32, a: __m256i) {
/// Store 128-bits (composed of 4 packed 32-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_epi32&expand=5624)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_epi32&expand=5624)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32
@@ -30168,7 +30168,7 @@ pub unsafe fn _mm_storeu_epi32(mem_addr: *mut i32, a: __m128i) {
/// Load 512-bits (composed of 8 packed 64-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_loadu_epi64&expand=3386)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_epi64&expand=3386)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64
@@ -30178,7 +30178,7 @@ pub unsafe fn _mm512_loadu_epi64(mem_addr: *const i64) -> __m512i {
/// Load 256-bits (composed of 4 packed 64-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu_epi64&expand=3383)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_loadu_epi64&expand=3383)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64
@@ -30188,7 +30188,7 @@ pub unsafe fn _mm256_loadu_epi64(mem_addr: *const i64) -> __m256i {
/// Load 128-bits (composed of 2 packed 64-bit integers) from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_epi64&expand=3380)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_epi64&expand=3380)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64
@@ -30198,7 +30198,7 @@ pub unsafe fn _mm_loadu_epi64(mem_addr: *const i64) -> __m128i {
/// Store 512-bits (composed of 8 packed 64-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_storeu_epi64&expand=5634)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_epi64&expand=5634)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64
@@ -30208,7 +30208,7 @@ pub unsafe fn _mm512_storeu_epi64(mem_addr: *mut i64, a: __m512i) {
/// Store 256-bits (composed of 4 packed 64-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu_epi64&expand=5632)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_storeu_epi64&expand=5632)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64
@@ -30218,7 +30218,7 @@ pub unsafe fn _mm256_storeu_epi64(mem_addr: *mut i64, a: __m256i) {
/// Store 128-bits (composed of 2 packed 64-bit integers) from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_epi64&expand=5630)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_epi64&expand=5630)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu64
@@ -30228,7 +30228,7 @@ pub unsafe fn _mm_storeu_epi64(mem_addr: *mut i64, a: __m128i) {
/// Load 512-bits of integer data from memory into dst. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_loadu_si512&expand=3420)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_si512&expand=3420)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32
@@ -30238,7 +30238,7 @@ pub unsafe fn _mm512_loadu_si512(mem_addr: *const i32) -> __m512i {
/// Store 512-bits of integer data from a into memory. mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_storeu_si512&expand=5657)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_si512&expand=5657)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovups))] //should be vmovdqu32
@@ -30250,7 +30250,7 @@ pub unsafe fn _mm512_storeu_si512(mem_addr: *mut i32, a: __m512i) {
/// floating-point elements) from memory into result.
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_loadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_pd)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovups))]
@@ -30262,7 +30262,7 @@ pub unsafe fn _mm512_loadu_pd(mem_addr: *const f64) -> __m512d {
/// floating-point elements) from `a` into memory.
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_storeu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_pd)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovups))]
@@ -30274,7 +30274,7 @@ pub unsafe fn _mm512_storeu_pd(mem_addr: *mut f64, a: __m512d) {
/// floating-point elements) from memory into result.
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_loadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_loadu_ps)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovups))]
@@ -30286,7 +30286,7 @@ pub unsafe fn _mm512_loadu_ps(mem_addr: *const f32) -> __m512 {
/// floating-point elements) from `a` into memory.
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_storeu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_storeu_ps)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovups))]
@@ -30297,7 +30297,7 @@ pub unsafe fn _mm512_storeu_ps(mem_addr: *mut f32, a: __m512) {
/// Load 512-bits of integer data from memory into dst. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_load_si512&expand=3345)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_load_si512&expand=3345)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32
@@ -30307,7 +30307,7 @@ pub unsafe fn _mm512_load_si512(mem_addr: *const i32) -> __m512i {
/// Store 512-bits of integer data from a into memory. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_store_si512&expand=5598)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_store_si512&expand=5598)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32
@@ -30317,7 +30317,7 @@ pub unsafe fn _mm512_store_si512(mem_addr: *mut i32, a: __m512i) {
/// Load 512-bits (composed of 16 packed 32-bit integers) from memory into dst. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_load_epi32&expand=3304)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_load_epi32&expand=3304)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32
@@ -30327,7 +30327,7 @@ pub unsafe fn _mm512_load_epi32(mem_addr: *const i32) -> __m512i {
/// Load 256-bits (composed of 8 packed 32-bit integers) from memory into dst. mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_load_epi32&expand=3301)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_load_epi32&expand=3301)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32
@@ -30337,7 +30337,7 @@ pub unsafe fn _mm256_load_epi32(mem_addr: *const i32) -> __m256i {
/// Load 128-bits (composed of 4 packed 32-bit integers) from memory into dst. mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_epi32&expand=3298)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_epi32&expand=3298)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32
@@ -30347,7 +30347,7 @@ pub unsafe fn _mm_load_epi32(mem_addr: *const i32) -> __m128i {
/// Store 512-bits (composed of 16 packed 32-bit integers) from a into memory. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=512_store_epi32&expand=5569)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=512_store_epi32&expand=5569)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32
@@ -30357,7 +30357,7 @@ pub unsafe fn _mm512_store_epi32(mem_addr: *mut i32, a: __m512i) {
/// Store 256-bits (composed of 8 packed 32-bit integers) from a into memory. mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_store_epi32&expand=5567)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_store_epi32&expand=5567)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32
@@ -30367,7 +30367,7 @@ pub unsafe fn _mm256_store_epi32(mem_addr: *mut i32, a: __m256i) {
/// Store 128-bits (composed of 4 packed 32-bit integers) from a into memory. mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_epi32&expand=5565)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_epi32&expand=5565)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa32
@@ -30377,7 +30377,7 @@ pub unsafe fn _mm_store_epi32(mem_addr: *mut i32, a: __m128i) {
/// Load 512-bits (composed of 8 packed 64-bit integers) from memory into dst. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_load_epi64&expand=3313)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_load_epi64&expand=3313)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa64
@@ -30387,7 +30387,7 @@ pub unsafe fn _mm512_load_epi64(mem_addr: *const i64) -> __m512i {
/// Load 256-bits (composed of 4 packed 64-bit integers) from memory into dst. mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_load_epi64&expand=3310)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_load_epi64&expand=3310)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa64
@@ -30397,7 +30397,7 @@ pub unsafe fn _mm256_load_epi64(mem_addr: *const i64) -> __m256i {
/// Load 128-bits (composed of 2 packed 64-bit integers) from memory into dst. mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_epi64&expand=3307)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_epi64&expand=3307)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa64
@@ -30407,7 +30407,7 @@ pub unsafe fn _mm_load_epi64(mem_addr: *const i64) -> __m128i {
/// Store 512-bits (composed of 8 packed 64-bit integers) from a into memory. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_store_epi64&expand=5575)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_store_epi64&expand=5575)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa64
@@ -30417,7 +30417,7 @@ pub unsafe fn _mm512_store_epi64(mem_addr: *mut i64, a: __m512i) {
/// Store 256-bits (composed of 4 packed 64-bit integers) from a into memory. mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_store_epi64&expand=5573)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_store_epi64&expand=5573)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa64
@@ -30427,7 +30427,7 @@ pub unsafe fn _mm256_store_epi64(mem_addr: *mut i64, a: __m256i) {
/// Store 128-bits (composed of 2 packed 64-bit integers) from a into memory. mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_epi64&expand=5571)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_epi64&expand=5571)
#[inline]
#[target_feature(enable = "avx512f,avx512vl")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovdqa64
@@ -30437,7 +30437,7 @@ pub unsafe fn _mm_store_epi64(mem_addr: *mut i64, a: __m128i) {
/// Load 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from memory into dst. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_load_ps&expand=3336)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_load_ps&expand=3336)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))]
@@ -30447,7 +30447,7 @@ pub unsafe fn _mm512_load_ps(mem_addr: *const f32) -> __m512 {
/// Store 512-bits of integer data from a into memory. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_store_ps&expand=5592)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_store_ps&expand=5592)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))]
@@ -30457,7 +30457,7 @@ pub unsafe fn _mm512_store_ps(mem_addr: *mut f32, a: __m512) {
/// Load 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from memory into dst. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_load_pd&expand=3326)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_load_pd&expand=3326)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovapd
@@ -30467,7 +30467,7 @@ pub unsafe fn _mm512_load_pd(mem_addr: *const f64) -> __m512d {
/// Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from a into memory. mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_store_pd&expand=5585)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_store_pd&expand=5585)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovaps))] //should be vmovapd
@@ -30479,7 +30479,7 @@ pub unsafe fn _mm512_store_pd(mem_addr: *mut f64, a: __m512d) {
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_loadu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_loadu_epi32)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_loadu_epi32(src: __m512i, k: __mmask16, mem_addr: *const i32) -> __m512i {
@@ -30498,7 +30498,7 @@ pub unsafe fn _mm512_mask_loadu_epi32(src: __m512i, k: __mmask16, mem_addr: *con
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_loadu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_loadu_epi32)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_maskz_loadu_epi32(k: __mmask16, mem_addr: *const i32) -> __m512i {
@@ -30517,7 +30517,7 @@ pub unsafe fn _mm512_maskz_loadu_epi32(k: __mmask16, mem_addr: *const i32) -> __
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_loadu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_loadu_epi64)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_loadu_epi64(src: __m512i, k: __mmask8, mem_addr: *const i64) -> __m512i {
@@ -30536,7 +30536,7 @@ pub unsafe fn _mm512_mask_loadu_epi64(src: __m512i, k: __mmask8, mem_addr: *cons
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_loadu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_loadu_epi64)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m512i {
@@ -30555,7 +30555,7 @@ pub unsafe fn _mm512_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_loadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_loadu_ps)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_loadu_ps(src: __m512, k: __mmask16, mem_addr: *const f32) -> __m512 {
@@ -30574,7 +30574,7 @@ pub unsafe fn _mm512_mask_loadu_ps(src: __m512, k: __mmask16, mem_addr: *const f
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_loadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_loadu_ps)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_maskz_loadu_ps(k: __mmask16, mem_addr: *const f32) -> __m512 {
@@ -30593,7 +30593,7 @@ pub unsafe fn _mm512_maskz_loadu_ps(k: __mmask16, mem_addr: *const f32) -> __m51
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_loadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_loadu_pd)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_loadu_pd(src: __m512d, k: __mmask8, mem_addr: *const f64) -> __m512d {
@@ -30612,7 +30612,7 @@ pub unsafe fn _mm512_mask_loadu_pd(src: __m512d, k: __mmask8, mem_addr: *const f
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_loadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_loadu_pd)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m512d {
@@ -30631,7 +30631,7 @@ pub unsafe fn _mm512_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m512
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_loadu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_loadu_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_loadu_epi32(src: __m256i, k: __mmask8, mem_addr: *const i32) -> __m256i {
@@ -30650,7 +30650,7 @@ pub unsafe fn _mm256_mask_loadu_epi32(src: __m256i, k: __mmask8, mem_addr: *cons
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_loadu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_loadu_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m256i {
@@ -30669,7 +30669,7 @@ pub unsafe fn _mm256_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_loadu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_loadu_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_loadu_epi64(src: __m256i, k: __mmask8, mem_addr: *const i64) -> __m256i {
@@ -30688,7 +30688,7 @@ pub unsafe fn _mm256_mask_loadu_epi64(src: __m256i, k: __mmask8, mem_addr: *cons
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_loadu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_loadu_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m256i {
@@ -30707,7 +30707,7 @@ pub unsafe fn _mm256_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_loadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_loadu_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_loadu_ps(src: __m256, k: __mmask8, mem_addr: *const f32) -> __m256 {
@@ -30726,7 +30726,7 @@ pub unsafe fn _mm256_mask_loadu_ps(src: __m256, k: __mmask8, mem_addr: *const f3
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_loadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_loadu_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m256 {
@@ -30745,7 +30745,7 @@ pub unsafe fn _mm256_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m256
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_loadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_loadu_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_loadu_pd(src: __m256d, k: __mmask8, mem_addr: *const f64) -> __m256d {
@@ -30764,7 +30764,7 @@ pub unsafe fn _mm256_mask_loadu_pd(src: __m256d, k: __mmask8, mem_addr: *const f
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_loadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_loadu_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m256d {
@@ -30783,7 +30783,7 @@ pub unsafe fn _mm256_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m256
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_loadu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_loadu_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_loadu_epi32(src: __m128i, k: __mmask8, mem_addr: *const i32) -> __m128i {
@@ -30802,7 +30802,7 @@ pub unsafe fn _mm_mask_loadu_epi32(src: __m128i, k: __mmask8, mem_addr: *const i
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_loadu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_loadu_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i {
@@ -30821,7 +30821,7 @@ pub unsafe fn _mm_maskz_loadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m128
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_loadu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_loadu_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_loadu_epi64(src: __m128i, k: __mmask8, mem_addr: *const i64) -> __m128i {
@@ -30840,7 +30840,7 @@ pub unsafe fn _mm_mask_loadu_epi64(src: __m128i, k: __mmask8, mem_addr: *const i
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_loadu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_loadu_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i {
@@ -30859,7 +30859,7 @@ pub unsafe fn _mm_maskz_loadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m128
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_loadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_loadu_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_loadu_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -> __m128 {
@@ -30878,7 +30878,7 @@ pub unsafe fn _mm_mask_loadu_ps(src: __m128, k: __mmask8, mem_addr: *const f32)
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_loadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_loadu_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m128 {
@@ -30897,7 +30897,7 @@ pub unsafe fn _mm_maskz_loadu_ps(k: __mmask8, mem_addr: *const f32) -> __m128 {
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_loadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_loadu_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_loadu_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) -> __m128d {
@@ -30916,7 +30916,7 @@ pub unsafe fn _mm_mask_loadu_pd(src: __m128d, k: __mmask8, mem_addr: *const f64)
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_loadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_loadu_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m128d {
@@ -30935,7 +30935,7 @@ pub unsafe fn _mm_maskz_loadu_pd(k: __mmask8, mem_addr: *const f64) -> __m128d {
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_load_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_load_epi32)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_load_epi32(src: __m512i, k: __mmask16, mem_addr: *const i32) -> __m512i {
@@ -30954,7 +30954,7 @@ pub unsafe fn _mm512_mask_load_epi32(src: __m512i, k: __mmask16, mem_addr: *cons
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_load_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_load_epi32)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_maskz_load_epi32(k: __mmask16, mem_addr: *const i32) -> __m512i {
@@ -30973,7 +30973,7 @@ pub unsafe fn _mm512_maskz_load_epi32(k: __mmask16, mem_addr: *const i32) -> __m
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_load_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_load_epi64)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_load_epi64(src: __m512i, k: __mmask8, mem_addr: *const i64) -> __m512i {
@@ -30992,7 +30992,7 @@ pub unsafe fn _mm512_mask_load_epi64(src: __m512i, k: __mmask8, mem_addr: *const
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_load_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_load_epi64)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m512i {
@@ -31011,7 +31011,7 @@ pub unsafe fn _mm512_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m5
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_load_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_load_ps)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_load_ps(src: __m512, k: __mmask16, mem_addr: *const f32) -> __m512 {
@@ -31030,7 +31030,7 @@ pub unsafe fn _mm512_mask_load_ps(src: __m512, k: __mmask16, mem_addr: *const f3
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_load_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_load_ps)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_maskz_load_ps(k: __mmask16, mem_addr: *const f32) -> __m512 {
@@ -31049,7 +31049,7 @@ pub unsafe fn _mm512_maskz_load_ps(k: __mmask16, mem_addr: *const f32) -> __m512
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_load_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_load_pd)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_load_pd(src: __m512d, k: __mmask8, mem_addr: *const f64) -> __m512d {
@@ -31068,7 +31068,7 @@ pub unsafe fn _mm512_mask_load_pd(src: __m512d, k: __mmask8, mem_addr: *const f6
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_load_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_load_pd)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m512d {
@@ -31087,7 +31087,7 @@ pub unsafe fn _mm512_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m512d
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_load_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_load_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_load_epi32(src: __m256i, k: __mmask8, mem_addr: *const i32) -> __m256i {
@@ -31106,7 +31106,7 @@ pub unsafe fn _mm256_mask_load_epi32(src: __m256i, k: __mmask8, mem_addr: *const
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_load_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_load_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m256i {
@@ -31125,7 +31125,7 @@ pub unsafe fn _mm256_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m2
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_load_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_load_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_load_epi64(src: __m256i, k: __mmask8, mem_addr: *const i64) -> __m256i {
@@ -31144,7 +31144,7 @@ pub unsafe fn _mm256_mask_load_epi64(src: __m256i, k: __mmask8, mem_addr: *const
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_load_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_load_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m256i {
@@ -31163,7 +31163,7 @@ pub unsafe fn _mm256_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m2
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_load_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_load_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_load_ps(src: __m256, k: __mmask8, mem_addr: *const f32) -> __m256 {
@@ -31182,7 +31182,7 @@ pub unsafe fn _mm256_mask_load_ps(src: __m256, k: __mmask8, mem_addr: *const f32
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_load_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_load_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m256 {
@@ -31201,7 +31201,7 @@ pub unsafe fn _mm256_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m256
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_load_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_load_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_load_pd(src: __m256d, k: __mmask8, mem_addr: *const f64) -> __m256d {
@@ -31220,7 +31220,7 @@ pub unsafe fn _mm256_mask_load_pd(src: __m256d, k: __mmask8, mem_addr: *const f6
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_load_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_load_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m256d {
@@ -31239,7 +31239,7 @@ pub unsafe fn _mm256_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m256d
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_load_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_load_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_load_epi32(src: __m128i, k: __mmask8, mem_addr: *const i32) -> __m128i {
@@ -31258,7 +31258,7 @@ pub unsafe fn _mm_mask_load_epi32(src: __m128i, k: __mmask8, mem_addr: *const i3
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_load_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_load_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i {
@@ -31277,7 +31277,7 @@ pub unsafe fn _mm_maskz_load_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_load_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_load_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_load_epi64(src: __m128i, k: __mmask8, mem_addr: *const i64) -> __m128i {
@@ -31296,7 +31296,7 @@ pub unsafe fn _mm_mask_load_epi64(src: __m128i, k: __mmask8, mem_addr: *const i6
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_load_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_load_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i {
@@ -31315,7 +31315,7 @@ pub unsafe fn _mm_maskz_load_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_load_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_load_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_load_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -> __m128 {
@@ -31334,7 +31334,7 @@ pub unsafe fn _mm_mask_load_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_load_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_load_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m128 {
@@ -31353,7 +31353,7 @@ pub unsafe fn _mm_maskz_load_ps(k: __mmask8, mem_addr: *const f32) -> __m128 {
/// (elements are copied from src when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_load_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_load_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_load_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) -> __m128d {
@@ -31372,7 +31372,7 @@ pub unsafe fn _mm_mask_load_pd(src: __m128d, k: __mmask8, mem_addr: *const f64)
/// (elements are zeroed out when the corresponding mask bit is not set).
/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_load_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_load_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m128d {
@@ -31390,7 +31390,7 @@ pub unsafe fn _mm_maskz_load_pd(k: __mmask8, mem_addr: *const f64) -> __m128d {
/// Store packed 32-bit integers from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_storeu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_storeu_epi32)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask16, a: __m512i) {
@@ -31406,7 +31406,7 @@ pub unsafe fn _mm512_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask16, a: _
/// Store packed 64-bit integers from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_storeu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_storeu_epi64)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m512i) {
@@ -31422,7 +31422,7 @@ pub unsafe fn _mm512_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __
/// Store packed single-precision (32-bit) floating-point elements from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_storeu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_storeu_ps)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask16, a: __m512) {
@@ -31438,7 +31438,7 @@ pub unsafe fn _mm512_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask16, a: __m5
/// Store packed double-precision (64-bit) floating-point elements from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_storeu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_storeu_pd)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512d) {
@@ -31454,7 +31454,7 @@ pub unsafe fn _mm512_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m51
/// Store packed 32-bit integers from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_storeu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_storeu_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m256i) {
@@ -31470,7 +31470,7 @@ pub unsafe fn _mm256_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __
/// Store packed 64-bit integers from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_storeu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_storeu_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m256i) {
@@ -31486,7 +31486,7 @@ pub unsafe fn _mm256_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __
/// Store packed single-precision (32-bit) floating-point elements from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_storeu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_storeu_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256) {
@@ -31502,7 +31502,7 @@ pub unsafe fn _mm256_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m25
/// Store packed double-precision (64-bit) floating-point elements from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_storeu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_storeu_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256d) {
@@ -31518,7 +31518,7 @@ pub unsafe fn _mm256_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m25
/// Store packed 32-bit integers from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_storeu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_storeu_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128i) {
@@ -31534,7 +31534,7 @@ pub unsafe fn _mm_mask_storeu_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m12
/// Store packed 64-bit integers from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_storeu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_storeu_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128i) {
@@ -31550,7 +31550,7 @@ pub unsafe fn _mm_mask_storeu_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m12
/// Store packed single-precision (32-bit) floating-point elements from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_storeu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_storeu_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) {
@@ -31566,7 +31566,7 @@ pub unsafe fn _mm_mask_storeu_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128)
/// Store packed double-precision (64-bit) floating-point elements from a into memory using writemask k.
/// mem_addr does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_storeu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_storeu_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) {
@@ -31582,7 +31582,7 @@ pub unsafe fn _mm_mask_storeu_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d)
/// Store packed 32-bit integers from a into memory using writemask k.
/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_store_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_store_epi32)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_store_epi32(mem_addr: *mut i32, mask: __mmask16, a: __m512i) {
@@ -31598,7 +31598,7 @@ pub unsafe fn _mm512_mask_store_epi32(mem_addr: *mut i32, mask: __mmask16, a: __
/// Store packed 64-bit integers from a into memory using writemask k.
/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_store_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_store_epi64)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m512i) {
@@ -31614,7 +31614,7 @@ pub unsafe fn _mm512_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m
/// Store packed single-precision (32-bit) floating-point elements from a into memory using writemask k.
/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_store_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_store_ps)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_store_ps(mem_addr: *mut f32, mask: __mmask16, a: __m512) {
@@ -31630,7 +31630,7 @@ pub unsafe fn _mm512_mask_store_ps(mem_addr: *mut f32, mask: __mmask16, a: __m51
/// Store packed double-precision (64-bit) floating-point elements from a into memory using writemask k.
/// mem_addr must be aligned on a 64-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_store_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_store_pd)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512d) {
@@ -31646,7 +31646,7 @@ pub unsafe fn _mm512_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m512
/// Store packed 32-bit integers from a into memory using writemask k.
/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_store_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_store_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m256i) {
@@ -31662,7 +31662,7 @@ pub unsafe fn _mm256_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m
/// Store packed 64-bit integers from a into memory using writemask k.
/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_store_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_store_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m256i) {
@@ -31678,7 +31678,7 @@ pub unsafe fn _mm256_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m
/// Store packed single-precision (32-bit) floating-point elements from a into memory using writemask k.
/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_store_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_store_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256) {
@@ -31694,7 +31694,7 @@ pub unsafe fn _mm256_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m256
/// Store packed double-precision (64-bit) floating-point elements from a into memory using writemask k.
/// mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_store_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_store_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256d) {
@@ -31710,7 +31710,7 @@ pub unsafe fn _mm256_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m256
/// Store packed 32-bit integers from a into memory using writemask k.
/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_store_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_store_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128i) {
@@ -31726,7 +31726,7 @@ pub unsafe fn _mm_mask_store_epi32(mem_addr: *mut i32, mask: __mmask8, a: __m128
/// Store packed 64-bit integers from a into memory using writemask k.
/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_store_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_store_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128i) {
@@ -31742,7 +31742,7 @@ pub unsafe fn _mm_mask_store_epi64(mem_addr: *mut i64, mask: __mmask8, a: __m128
/// Store packed single-precision (32-bit) floating-point elements from a into memory using writemask k.
/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_store_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_store_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) {
@@ -31758,7 +31758,7 @@ pub unsafe fn _mm_mask_store_ps(mem_addr: *mut f32, mask: __mmask8, a: __m128) {
/// Store packed double-precision (64-bit) floating-point elements from a into memory using writemask k.
/// mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_store_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_store_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d) {
@@ -31773,7 +31773,7 @@ pub unsafe fn _mm_mask_store_pd(mem_addr: *mut f64, mask: __mmask8, a: __m128d)
/// Load contiguous active 32-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_expandloadu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_epi32)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_expandloadu_epi32(
@@ -31794,7 +31794,7 @@ pub unsafe fn _mm512_mask_expandloadu_epi32(
/// Load contiguous active 32-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_expandloadu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_epi32)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_maskz_expandloadu_epi32(k: __mmask16, mem_addr: *const i32) -> __m512i {
@@ -31811,7 +31811,7 @@ pub unsafe fn _mm512_maskz_expandloadu_epi32(k: __mmask16, mem_addr: *const i32)
/// Load contiguous active 32-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_expandloadu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_expandloadu_epi32(
@@ -31832,7 +31832,7 @@ pub unsafe fn _mm256_mask_expandloadu_epi32(
/// Load contiguous active 32-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_expandloadu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_maskz_expandloadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m256i {
@@ -31849,7 +31849,7 @@ pub unsafe fn _mm256_maskz_expandloadu_epi32(k: __mmask8, mem_addr: *const i32)
/// Load contiguous active 32-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_expandloadu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_expandloadu_epi32(
@@ -31870,7 +31870,7 @@ pub unsafe fn _mm_mask_expandloadu_epi32(
/// Load contiguous active 32-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_expandloadu_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_epi32)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_expandloadu_epi32(k: __mmask8, mem_addr: *const i32) -> __m128i {
@@ -31887,7 +31887,7 @@ pub unsafe fn _mm_maskz_expandloadu_epi32(k: __mmask8, mem_addr: *const i32) ->
/// Load contiguous active 64-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_expandloadu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_epi64)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_expandloadu_epi64(
@@ -31908,7 +31908,7 @@ pub unsafe fn _mm512_mask_expandloadu_epi64(
/// Load contiguous active 64-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_expandloadu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_epi64)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_maskz_expandloadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m512i {
@@ -31925,7 +31925,7 @@ pub unsafe fn _mm512_maskz_expandloadu_epi64(k: __mmask8, mem_addr: *const i64)
/// Load contiguous active 64-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_expandloadu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_expandloadu_epi64(
@@ -31946,7 +31946,7 @@ pub unsafe fn _mm256_mask_expandloadu_epi64(
/// Load contiguous active 64-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_expandloadu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_maskz_expandloadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m256i {
@@ -31963,7 +31963,7 @@ pub unsafe fn _mm256_maskz_expandloadu_epi64(k: __mmask8, mem_addr: *const i64)
/// Load contiguous active 64-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_expandloadu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_expandloadu_epi64(
@@ -31984,7 +31984,7 @@ pub unsafe fn _mm_mask_expandloadu_epi64(
/// Load contiguous active 64-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_expandloadu_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_epi64)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_expandloadu_epi64(k: __mmask8, mem_addr: *const i64) -> __m128i {
@@ -32001,7 +32001,7 @@ pub unsafe fn _mm_maskz_expandloadu_epi64(k: __mmask8, mem_addr: *const i64) ->
/// Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_expandloadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_ps)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_expandloadu_ps(
@@ -32022,7 +32022,7 @@ pub unsafe fn _mm512_mask_expandloadu_ps(
/// Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_expandloadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_ps)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_maskz_expandloadu_ps(k: __mmask16, mem_addr: *const f32) -> __m512 {
@@ -32039,7 +32039,7 @@ pub unsafe fn _mm512_maskz_expandloadu_ps(k: __mmask16, mem_addr: *const f32) ->
/// Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_expandloadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_expandloadu_ps(src: __m256, k: __mmask8, mem_addr: *const f32) -> __m256 {
@@ -32056,7 +32056,7 @@ pub unsafe fn _mm256_mask_expandloadu_ps(src: __m256, k: __mmask8, mem_addr: *co
/// Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_expandloadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_maskz_expandloadu_ps(k: __mmask8, mem_addr: *const f32) -> __m256 {
@@ -32073,7 +32073,7 @@ pub unsafe fn _mm256_maskz_expandloadu_ps(k: __mmask8, mem_addr: *const f32) ->
/// Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_expandloadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_expandloadu_ps(src: __m128, k: __mmask8, mem_addr: *const f32) -> __m128 {
@@ -32090,7 +32090,7 @@ pub unsafe fn _mm_mask_expandloadu_ps(src: __m128, k: __mmask8, mem_addr: *const
/// Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_expandloadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_ps)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_expandloadu_ps(k: __mmask8, mem_addr: *const f32) -> __m128 {
@@ -32107,7 +32107,7 @@ pub unsafe fn _mm_maskz_expandloadu_ps(k: __mmask8, mem_addr: *const f32) -> __m
/// Load contiguous active single-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_expandloadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_pd)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_mask_expandloadu_pd(
@@ -32128,7 +32128,7 @@ pub unsafe fn _mm512_mask_expandloadu_pd(
/// Load contiguous active single-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_expandloadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_pd)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) -> __m512d {
@@ -32145,7 +32145,7 @@ pub unsafe fn _mm512_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) ->
/// Load contiguous active single-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_expandloadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_mask_expandloadu_pd(
@@ -32166,7 +32166,7 @@ pub unsafe fn _mm256_mask_expandloadu_pd(
/// Load contiguous active single-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_expandloadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx")]
pub unsafe fn _mm256_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) -> __m256d {
@@ -32183,7 +32183,7 @@ pub unsafe fn _mm256_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) ->
/// Load contiguous active single-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_expandloadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_expandloadu_pd(src: __m128d, k: __mmask8, mem_addr: *const f64) -> __m128d {
@@ -32200,7 +32200,7 @@ pub unsafe fn _mm_mask_expandloadu_pd(src: __m128d, k: __mmask8, mem_addr: *cons
/// Load contiguous active single-precision (64-bit) floating-point elements from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_expandloadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_pd)
#[inline]
#[target_feature(enable = "avx512f,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) -> __m128d {
@@ -32217,7 +32217,7 @@ pub unsafe fn _mm_maskz_expandloadu_pd(k: __mmask8, mem_addr: *const f64) -> __m
/// Set packed double-precision (64-bit) floating-point elements in dst with the supplied values in reverse order.
///
-/// [Intel's documentation]( https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setr_pd&expand=5002)
+/// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_setr_pd&expand=5002)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_setr_pd(
@@ -32236,7 +32236,7 @@ pub unsafe fn _mm512_setr_pd(
/// Set packed double-precision (64-bit) floating-point elements in dst with the supplied values.
///
-/// [Intel's documentation]( https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set_pd&expand=4924)
+/// [Intel's documentation]( https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_set_pd&expand=4924)
#[inline]
#[target_feature(enable = "avx512f")]
pub unsafe fn _mm512_set_pd(
@@ -32254,7 +32254,7 @@ pub unsafe fn _mm512_set_pd(
/// Move the lower single-precision (32-bit) floating-point element from b to the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_move_ss&expand=3832)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_move_ss&expand=3832)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovss))]
@@ -32270,7 +32270,7 @@ pub unsafe fn _mm_mask_move_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -
/// Move the lower single-precision (32-bit) floating-point element from b to the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_move_ss&expand=3833)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_move_ss&expand=3833)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovss))]
@@ -32285,7 +32285,7 @@ pub unsafe fn _mm_maskz_move_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Move the lower double-precision (64-bit) floating-point element from b to the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_move_sd&expand=3829)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_move_sd&expand=3829)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovsd))]
@@ -32301,7 +32301,7 @@ pub unsafe fn _mm_mask_move_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d
/// Move the lower double-precision (64-bit) floating-point element from b to the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_move_sd&expand=3830)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_move_sd&expand=3830)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmovsd))]
@@ -32316,7 +32316,7 @@ pub unsafe fn _mm_maskz_move_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d
/// Add the lower single-precision (32-bit) floating-point element in a and b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_add_ss&expand=159)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_add_ss&expand=159)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddss))]
@@ -32334,7 +32334,7 @@ pub unsafe fn _mm_mask_add_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
/// Add the lower single-precision (32-bit) floating-point element in a and b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_add_ss&expand=160)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_add_ss&expand=160)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddss))]
@@ -32351,7 +32351,7 @@ pub unsafe fn _mm_maskz_add_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Add the lower double-precision (64-bit) floating-point element in a and b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_add_sd&expand=155)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_add_sd&expand=155)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddsd))]
@@ -32369,7 +32369,7 @@ pub unsafe fn _mm_mask_add_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
/// Add the lower double-precision (64-bit) floating-point element in a and b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_add_sd&expand=156)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_add_sd&expand=156)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddsd))]
@@ -32386,7 +32386,7 @@ pub unsafe fn _mm_maskz_add_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
/// Subtract the lower single-precision (32-bit) floating-point element in b from the lower single-precision (32-bit) floating-point element in a, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_sub_ss&expand=5750)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sub_ss&expand=5750)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubss))]
@@ -32404,7 +32404,7 @@ pub unsafe fn _mm_mask_sub_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
/// Subtract the lower single-precision (32-bit) floating-point element in b from the lower single-precision (32-bit) floating-point element in a, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_sub_ss&expand=5751)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sub_ss&expand=5751)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubss))]
@@ -32421,7 +32421,7 @@ pub unsafe fn _mm_maskz_sub_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Subtract the lower double-precision (64-bit) floating-point element in b from the lower double-precision (64-bit) floating-point element in a, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_sub_sd&expand=5746)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sub_sd&expand=5746)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubsd))]
@@ -32439,7 +32439,7 @@ pub unsafe fn _mm_mask_sub_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
/// Subtract the lower double-precision (64-bit) floating-point element in b from the lower double-precision (64-bit) floating-point element in a, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_sub_sd&expand=5747)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sub_sd&expand=5747)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubsd))]
@@ -32456,7 +32456,7 @@ pub unsafe fn _mm_maskz_sub_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
/// Multiply the lower single-precision (32-bit) floating-point element in a and b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_mul_ss&expand=3950)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_mul_ss&expand=3950)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulss))]
@@ -32474,7 +32474,7 @@ pub unsafe fn _mm_mask_mul_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
/// Multiply the lower single-precision (32-bit) floating-point element in a and b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_mul_ss&expand=3951)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_mul_ss&expand=3951)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulss))]
@@ -32491,7 +32491,7 @@ pub unsafe fn _mm_maskz_mul_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Multiply the lower double-precision (64-bit) floating-point element in a and b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_mul_sd&expand=3947)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_mul_sd&expand=3947)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulsd))]
@@ -32509,7 +32509,7 @@ pub unsafe fn _mm_mask_mul_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
/// Multiply the lower double-precision (64-bit) floating-point element in a and b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_mul_sd&expand=3948)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_mul_sd&expand=3948)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulsd))]
@@ -32526,7 +32526,7 @@ pub unsafe fn _mm_maskz_mul_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
/// Divide the lower single-precision (32-bit) floating-point element in a by the lower single-precision (32-bit) floating-point element in b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_div_ss&expand=2181)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_div_ss&expand=2181)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivss))]
@@ -32544,7 +32544,7 @@ pub unsafe fn _mm_mask_div_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
/// Divide the lower single-precision (32-bit) floating-point element in a by the lower single-precision (32-bit) floating-point element in b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_div_ss&expand=2182)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_div_ss&expand=2182)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivss))]
@@ -32561,7 +32561,7 @@ pub unsafe fn _mm_maskz_div_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Divide the lower double-precision (64-bit) floating-point element in a by the lower double-precision (64-bit) floating-point element in b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_div_sd&expand=2178)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_div_sd&expand=2178)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivsd))]
@@ -32579,7 +32579,7 @@ pub unsafe fn _mm_mask_div_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
/// Divide the lower double-precision (64-bit) floating-point element in a by the lower double-precision (64-bit) floating-point element in b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_div_sd&expand=2179)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_div_sd&expand=2179)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivsd))]
@@ -32596,7 +32596,7 @@ pub unsafe fn _mm_maskz_div_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
/// Compare the lower single-precision (32-bit) floating-point elements in a and b, store the maximum value in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_max_ss&expand=3672)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_max_ss&expand=3672)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxss))]
@@ -32612,7 +32612,7 @@ pub unsafe fn _mm_mask_max_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
/// Compare the lower single-precision (32-bit) floating-point elements in a and b, store the maximum value in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_max_ss&expand=3673)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_max_ss&expand=3673)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxss))]
@@ -32628,7 +32628,7 @@ pub unsafe fn _mm_maskz_max_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Compare the lower double-precision (64-bit) floating-point elements in a and b, store the maximum value in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_max_sd&expand=3669)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_max_sd&expand=3669)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxsd))]
@@ -32644,7 +32644,7 @@ pub unsafe fn _mm_mask_max_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
/// Compare the lower double-precision (64-bit) floating-point elements in a and b, store the maximum value in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_max_sd&expand=3670)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_max_sd&expand=3670)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxsd))]
@@ -32660,7 +32660,7 @@ pub unsafe fn _mm_maskz_max_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
/// Compare the lower single-precision (32-bit) floating-point elements in a and b, store the minimum value in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_min_ss&expand=3786)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_min_ss&expand=3786)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminss))]
@@ -32676,7 +32676,7 @@ pub unsafe fn _mm_mask_min_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) ->
/// Compare the lower single-precision (32-bit) floating-point elements in a and b, store the minimum value in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_min_ss&expand=3787)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_min_ss&expand=3787)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminss))]
@@ -32692,7 +32692,7 @@ pub unsafe fn _mm_maskz_min_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Compare the lower double-precision (64-bit) floating-point elements in a and b, store the minimum value in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_min_sd&expand=3783)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_min_sd&expand=3783)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminsd))]
@@ -32708,7 +32708,7 @@ pub unsafe fn _mm_mask_min_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d)
/// Compare the lower double-precision (64-bit) floating-point elements in a and b, store the minimum value in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_min_sd&expand=3784)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_min_sd&expand=3784)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminsd))]
@@ -32724,7 +32724,7 @@ pub unsafe fn _mm_maskz_min_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d {
/// Compute the square root of the lower single-precision (32-bit) floating-point element in b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_sqrt_ss&expand=5387)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sqrt_ss&expand=5387)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtss))]
@@ -32740,7 +32740,7 @@ pub unsafe fn _mm_mask_sqrt_ss(src: __m128, k: __mmask8, a: __m128, b: __m128) -
/// Compute the square root of the lower single-precision (32-bit) floating-point element in b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_sqrt_ss&expand=5388)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sqrt_ss&expand=5388)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtss))]
@@ -32756,7 +32756,7 @@ pub unsafe fn _mm_maskz_sqrt_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Compute the square root of the lower double-precision (64-bit) floating-point element in b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_sqrt_sd&expand=5384)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sqrt_sd&expand=5384)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtsd))]
@@ -32772,7 +32772,7 @@ pub unsafe fn _mm_mask_sqrt_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128d
/// Compute the square root of the lower double-precision (64-bit) floating-point element in b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_sqrt_sd&expand=5385)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sqrt_sd&expand=5385)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtsd))]
@@ -32788,7 +32788,7 @@ pub unsafe fn _mm_maskz_sqrt_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d
/// Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in b, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_rsqrt14_ss&expand=4825)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_rsqrt14_ss&expand=4825)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrsqrt14ss))]
@@ -32803,7 +32803,7 @@ pub unsafe fn _mm_rsqrt14_ss(a: __m128, b: __m128) -> __m128 {
/// Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_rsqrt14_ss&expand=4823)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_rsqrt14_ss&expand=4823)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrsqrt14ss))]
@@ -32813,7 +32813,7 @@ pub unsafe fn _mm_mask_rsqrt14_ss(src: __m128, k: __mmask8, a: __m128, b: __m128
/// Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_rsqrt14_ss&expand=4824)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_rsqrt14_ss&expand=4824)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrsqrt14ss))]
@@ -32828,7 +32828,7 @@ pub unsafe fn _mm_maskz_rsqrt14_ss(k: __mmask8, a: __m128, b: __m128) -> __m128
/// Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in b, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_rsqrt14_sd&expand=4822)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_rsqrt14_sd&expand=4822)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrsqrt14sd))]
@@ -32843,7 +32843,7 @@ pub unsafe fn _mm_rsqrt14_sd(a: __m128d, b: __m128d) -> __m128d {
/// Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_rsqrt14_sd&expand=4820)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_rsqrt14_sd&expand=4820)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrsqrt14sd))]
@@ -32853,7 +32853,7 @@ pub unsafe fn _mm_mask_rsqrt14_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m1
/// Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_rsqrt14_sd&expand=4821)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_rsqrt14_sd&expand=4821)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrsqrt14sd))]
@@ -32868,7 +32868,7 @@ pub unsafe fn _mm_maskz_rsqrt14_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m12
/// Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in b, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_rcp14_ss&expand=4508)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_rcp14_ss&expand=4508)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrcp14ss))]
@@ -32883,7 +32883,7 @@ pub unsafe fn _mm_rcp14_ss(a: __m128, b: __m128) -> __m128 {
/// Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_rcp14_ss&expand=4506)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_rcp14_ss&expand=4506)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrcp14ss))]
@@ -32893,7 +32893,7 @@ pub unsafe fn _mm_mask_rcp14_ss(src: __m128, k: __mmask8, a: __m128, b: __m128)
/// Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_rcp14_ss&expand=4507)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_rcp14_ss&expand=4507)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrcp14ss))]
@@ -32908,7 +32908,7 @@ pub unsafe fn _mm_maskz_rcp14_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in b, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_rcp14_sd&expand=4505)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_rcp14_sd&expand=4505)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrcp14sd))]
@@ -32923,7 +32923,7 @@ pub unsafe fn _mm_rcp14_sd(a: __m128d, b: __m128d) -> __m128d {
/// Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_rcp14_sd&expand=4503)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_rcp14_sd&expand=4503)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrcp14sd))]
@@ -32933,7 +32933,7 @@ pub unsafe fn _mm_mask_rcp14_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128
/// Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. The maximum relative error for this approximation is less than 2^-14.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_rcp14_sd&expand=4504)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_rcp14_sd&expand=4504)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrcp14sd))]
@@ -32948,7 +32948,7 @@ pub unsafe fn _mm_maskz_rcp14_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128d
/// Convert the exponent of the lower single-precision (32-bit) floating-point element in b to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst. This intrinsic essentially calculates floor(log2(x)) for the lower element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_getexp_ss&expand=2862)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getexp_ss&expand=2862)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpss))]
@@ -32964,7 +32964,7 @@ pub unsafe fn _mm_getexp_ss(a: __m128, b: __m128) -> __m128 {
/// Convert the exponent of the lower single-precision (32-bit) floating-point element in b to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. This intrinsic essentially calculates floor(log2(x)) for the lower element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_getexp_ss&expand=2863)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getexp_ss&expand=2863)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpss))]
@@ -32980,7 +32980,7 @@ pub unsafe fn _mm_mask_getexp_ss(src: __m128, k: __mmask8, a: __m128, b: __m128)
/// Convert the exponent of the lower single-precision (32-bit) floating-point element in b to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. This intrinsic essentially calculates floor(log2(x)) for the lower element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_getexp_ss&expand=2864)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getexp_ss&expand=2864)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpss))]
@@ -32996,7 +32996,7 @@ pub unsafe fn _mm_maskz_getexp_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Convert the exponent of the lower double-precision (64-bit) floating-point element in b to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst. This intrinsic essentially calculates floor(log2(x)) for the lower element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_getexp_sd&expand=2859)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getexp_sd&expand=2859)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpsd))]
@@ -33012,7 +33012,7 @@ pub unsafe fn _mm_getexp_sd(a: __m128d, b: __m128d) -> __m128d {
/// Convert the exponent of the lower double-precision (64-bit) floating-point element in b to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. This intrinsic essentially calculates floor(log2(x)) for the lower element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_getexp_sd&expand=2860)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getexp_sd&expand=2860)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpsd))]
@@ -33028,7 +33028,7 @@ pub unsafe fn _mm_mask_getexp_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m12
/// Convert the exponent of the lower double-precision (64-bit) floating-point element in b to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. This intrinsic essentially calculates floor(log2(x)) for the lower element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_getexp_sd&expand=2861)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getexp_sd&expand=2861)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpsd))]
@@ -33054,7 +33054,7 @@ pub unsafe fn _mm_maskz_getexp_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_getmant_ss&expand=2898)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getmant_ss&expand=2898)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0))]
@@ -33066,8 +33066,8 @@ pub unsafe fn _mm_getmant_ss<
a: __m128,
b: __m128,
) -> __m128 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f32x4();
let b = b.as_f32x4();
let zero = _mm_setzero_ps().as_f32x4();
@@ -33087,7 +33087,7 @@ pub unsafe fn _mm_getmant_ss<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_getmant_ss&expand=2899)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getmant_ss&expand=2899)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0))]
@@ -33101,8 +33101,8 @@ pub unsafe fn _mm_mask_getmant_ss<
a: __m128,
b: __m128,
) -> __m128 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f32x4();
let b = b.as_f32x4();
let src = src.as_f32x4();
@@ -33122,7 +33122,7 @@ pub unsafe fn _mm_mask_getmant_ss<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_getmant_ss&expand=2900)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getmant_ss&expand=2900)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0))]
@@ -33135,8 +33135,8 @@ pub unsafe fn _mm_maskz_getmant_ss<
a: __m128,
b: __m128,
) -> __m128 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f32x4();
let b = b.as_f32x4();
let zero = _mm_setzero_ps().as_f32x4();
@@ -33156,7 +33156,7 @@ pub unsafe fn _mm_maskz_getmant_ss<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_getmant_sd&expand=2895)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getmant_sd&expand=2895)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0))]
@@ -33168,8 +33168,8 @@ pub unsafe fn _mm_getmant_sd<
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f64x2();
let b = b.as_f64x2();
let zero = _mm_setzero_pd().as_f64x2();
@@ -33189,7 +33189,7 @@ pub unsafe fn _mm_getmant_sd<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_getmant_sd&expand=2896)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getmant_sd&expand=2896)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0))]
@@ -33203,8 +33203,8 @@ pub unsafe fn _mm_mask_getmant_sd<
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f64x2();
let b = b.as_f64x2();
let src = src.as_f64x2();
@@ -33224,7 +33224,7 @@ pub unsafe fn _mm_mask_getmant_sd<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_getmant_sd&expand=2897)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getmant_sd&expand=2897)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0))]
@@ -33237,8 +33237,8 @@ pub unsafe fn _mm_maskz_getmant_sd<
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
let a = a.as_f64x2();
let b = b.as_f64x2();
let zero = _mm_setzero_pd().as_f64x2();
@@ -33254,13 +33254,13 @@ pub unsafe fn _mm_maskz_getmant_sd<
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_roundscale_ss&expand=4802)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_roundscale_ss&expand=4802)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 255))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_roundscale_ss<const IMM8: i32>(a: __m128, b: __m128) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let b = b.as_f32x4();
let zero = _mm_setzero_ps().as_f32x4();
@@ -33276,7 +33276,7 @@ pub unsafe fn _mm_roundscale_ss<const IMM8: i32>(a: __m128, b: __m128) -> __m128
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_roundscale_ss&expand=4800)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_roundscale_ss&expand=4800)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0))]
@@ -33287,7 +33287,7 @@ pub unsafe fn _mm_mask_roundscale_ss<const IMM8: i32>(
a: __m128,
b: __m128,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let b = b.as_f32x4();
let src = src.as_f32x4();
@@ -33303,7 +33303,7 @@ pub unsafe fn _mm_mask_roundscale_ss<const IMM8: i32>(
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_roundscale_ss&expand=4801)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_roundscale_ss&expand=4801)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0))]
@@ -33313,7 +33313,7 @@ pub unsafe fn _mm_maskz_roundscale_ss<const IMM8: i32>(
a: __m128,
b: __m128,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let b = b.as_f32x4();
let zero = _mm_setzero_ps().as_f32x4();
@@ -33329,13 +33329,13 @@ pub unsafe fn _mm_maskz_roundscale_ss<const IMM8: i32>(
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_roundscale_sd&expand=4799)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_roundscale_sd&expand=4799)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 255))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_roundscale_sd<const IMM8: i32>(a: __m128d, b: __m128d) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x2();
let b = b.as_f64x2();
let zero = _mm_setzero_pd().as_f64x2();
@@ -33351,7 +33351,7 @@ pub unsafe fn _mm_roundscale_sd<const IMM8: i32>(a: __m128d, b: __m128d) -> __m1
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_roundscale_sd&expand=4797)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_roundscale_sd&expand=4797)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0))]
@@ -33362,7 +33362,7 @@ pub unsafe fn _mm_mask_roundscale_sd<const IMM8: i32>(
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x2();
let b = b.as_f64x2();
let src = src.as_f64x2();
@@ -33378,7 +33378,7 @@ pub unsafe fn _mm_mask_roundscale_sd<const IMM8: i32>(
/// _MM_FROUND_TO_ZERO // truncate\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_roundscale_sd&expand=4798)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_roundscale_sd&expand=4798)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0))]
@@ -33388,7 +33388,7 @@ pub unsafe fn _mm_maskz_roundscale_sd<const IMM8: i32>(
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x2();
let b = b.as_f64x2();
let zero = _mm_setzero_pd().as_f64x2();
@@ -33398,7 +33398,7 @@ pub unsafe fn _mm_maskz_roundscale_sd<const IMM8: i32>(
/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_scalef_ss&expand=4901)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_scalef_ss&expand=4901)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefss))]
@@ -33411,7 +33411,7 @@ pub unsafe fn _mm_scalef_ss(a: __m128, b: __m128) -> __m128 {
/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_scalef_ss&expand=4899)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_scalef_ss&expand=4899)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefss))]
@@ -33424,7 +33424,7 @@ pub unsafe fn _mm_mask_scalef_ss(src: __m128, k: __mmask8, a: __m128, b: __m128)
/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_scalef_ss&expand=4900)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_scalef_ss&expand=4900)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefss))]
@@ -33440,7 +33440,7 @@ pub unsafe fn _mm_maskz_scalef_ss(k: __mmask8, a: __m128, b: __m128) -> __m128 {
/// Scale the packed double-precision (64-bit) floating-point elements in a using values from b, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_scalef_sd&expand=4898)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_scalef_sd&expand=4898)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefsd))]
@@ -33456,7 +33456,7 @@ pub unsafe fn _mm_scalef_sd(a: __m128d, b: __m128d) -> __m128d {
/// Scale the packed double-precision (64-bit) floating-point elements in a using values from b, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_scalef_sd&expand=4896)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_scalef_sd&expand=4896)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefsd))]
@@ -33472,7 +33472,7 @@ pub unsafe fn _mm_mask_scalef_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m12
/// Scale the packed double-precision (64-bit) floating-point elements in a using values from b, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_scalef_sd&expand=4897)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_scalef_sd&expand=4897)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefsd))]
@@ -33488,7 +33488,7 @@ pub unsafe fn _mm_maskz_scalef_sd(k: __mmask8, a: __m128d, b: __m128d) -> __m128
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fmadd_ss&expand=2582)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmadd_ss&expand=2582)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213ss))]
@@ -33505,7 +33505,7 @@ pub unsafe fn _mm_mask_fmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) ->
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fmadd_ss&expand=2584)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmadd_ss&expand=2584)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213ss))]
@@ -33523,7 +33523,7 @@ pub unsafe fn _mm_maskz_fmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from c to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fmadd_ss&expand=2583)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmadd_ss&expand=2583)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213ss))]
@@ -33540,7 +33540,7 @@ pub unsafe fn _mm_mask3_fmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fmadd_sd&expand=2578)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmadd_sd&expand=2578)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213sd))]
@@ -33557,7 +33557,7 @@ pub unsafe fn _mm_mask_fmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d)
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fmadd_sd&expand=2580)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmadd_sd&expand=2580)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213sd))]
@@ -33575,7 +33575,7 @@ pub unsafe fn _mm_maskz_fmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from c to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fmadd_sd&expand=2579)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmadd_sd&expand=2579)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213sd))]
@@ -33592,7 +33592,7 @@ pub unsafe fn _mm_mask3_fmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fmsub_ss&expand=2668)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmsub_ss&expand=2668)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213ss))]
@@ -33610,7 +33610,7 @@ pub unsafe fn _mm_mask_fmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) ->
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fmsub_ss&expand=2670)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmsub_ss&expand=2670)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213ss))]
@@ -33629,7 +33629,7 @@ pub unsafe fn _mm_maskz_fmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128) -
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from c to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fmsub_ss&expand=2669)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmsub_ss&expand=2669)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213ss))]
@@ -33647,7 +33647,7 @@ pub unsafe fn _mm_mask3_fmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8) -
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fmsub_sd&expand=2664)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmsub_sd&expand=2664)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213sd))]
@@ -33665,7 +33665,7 @@ pub unsafe fn _mm_mask_fmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d)
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fmsub_sd&expand=2666)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmsub_sd&expand=2666)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213sd))]
@@ -33684,7 +33684,7 @@ pub unsafe fn _mm_maskz_fmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128d
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from c to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fmsub_sd&expand=2665)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmsub_sd&expand=2665)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213sd))]
@@ -33702,7 +33702,7 @@ pub unsafe fn _mm_mask3_fmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask8
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fnmadd_ss&expand=2748)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmadd_ss&expand=2748)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213ss))]
@@ -33720,7 +33720,7 @@ pub unsafe fn _mm_mask_fnmadd_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fnmadd_ss&expand=2750)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmadd_ss&expand=2750)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213ss))]
@@ -33739,7 +33739,7 @@ pub unsafe fn _mm_maskz_fnmadd_ss(k: __mmask8, a: __m128, b: __m128, c: __m128)
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from c to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fnmadd_ss&expand=2749)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmadd_ss&expand=2749)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213ss))]
@@ -33757,7 +33757,7 @@ pub unsafe fn _mm_mask3_fnmadd_ss(a: __m128, b: __m128, c: __m128, k: __mmask8)
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fnmadd_sd&expand=2744)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmadd_sd&expand=2744)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213sd))]
@@ -33775,7 +33775,7 @@ pub unsafe fn _mm_mask_fnmadd_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fnmadd_sd&expand=2746)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmadd_sd&expand=2746)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213sd))]
@@ -33794,7 +33794,7 @@ pub unsafe fn _mm_maskz_fnmadd_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and add the negated intermediate result to the lower element in c. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from c to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fnmadd_sd&expand=2745)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmadd_sd&expand=2745)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213sd))]
@@ -33812,7 +33812,7 @@ pub unsafe fn _mm_mask3_fnmadd_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fnmsub_ss&expand=2796)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmsub_ss&expand=2796)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213ss))]
@@ -33831,7 +33831,7 @@ pub unsafe fn _mm_mask_fnmsub_ss(a: __m128, k: __mmask8, b: __m128, c: __m128) -
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fnmsub_ss&expand=2798)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmsub_ss&expand=2798)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213ss))]
@@ -33851,7 +33851,7 @@ pub unsafe fn _mm_maskz_fnmsub_ss(k: __mmask8, a: __m128, b: __m128, c: __m128)
/// Multiply the lower single-precision (32-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper 3 packed elements from c to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fnmsub_ss&expand=2797)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmsub_ss&expand=2797)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213ss))]
@@ -33870,7 +33870,7 @@ pub unsafe fn _mm_mask3_fnmsub_ss(a: __m128, b: __m128, c: __m128, k: __mmask8)
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fnmsub_sd&expand=2792)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmsub_sd&expand=2792)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213sd))]
@@ -33889,7 +33889,7 @@ pub unsafe fn _mm_mask_fnmsub_sd(a: __m128d, k: __mmask8, b: __m128d, c: __m128d
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fnmsub_sd&expand=2794)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmsub_sd&expand=2794)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213sd))]
@@ -33909,7 +33909,7 @@ pub unsafe fn _mm_maskz_fnmsub_sd(k: __mmask8, a: __m128d, b: __m128d, c: __m128
/// Multiply the lower double-precision (64-bit) floating-point elements in a and b, and subtract the lower element in c from the negated intermediate result. Store the result in the lower element of dst using writemask k (the element is copied from c when mask bit 0 is not set), and copy the upper element from c to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fnmsub_sd&expand=2793)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmsub_sd&expand=2793)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213sd))]
@@ -33935,7 +33935,7 @@ pub unsafe fn _mm_mask3_fnmsub_sd(a: __m128d, b: __m128d, c: __m128d, k: __mmask
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_round_ss&expand=151)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_round_ss&expand=151)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddss, ROUNDING = 8))]
@@ -33958,7 +33958,7 @@ pub unsafe fn _mm_add_round_ss<const ROUNDING: i32>(a: __m128, b: __m128) -> __m
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_add_round_ss&expand=152)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_add_round_ss&expand=152)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddss, ROUNDING = 8))]
@@ -33986,7 +33986,7 @@ pub unsafe fn _mm_mask_add_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_round_ss&expand=153)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_round_ss&expand=153)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddss, ROUNDING = 8))]
@@ -34013,7 +34013,7 @@ pub unsafe fn _mm_maskz_add_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_round_sd&expand=148)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_round_sd&expand=148)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddsd, ROUNDING = 8))]
@@ -34036,7 +34036,7 @@ pub unsafe fn _mm_add_round_sd<const ROUNDING: i32>(a: __m128d, b: __m128d) -> _
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_add_round_Sd&expand=149)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_add_round_Sd&expand=149)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddsd, ROUNDING = 8))]
@@ -34064,7 +34064,7 @@ pub unsafe fn _mm_mask_add_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_round_sd&expand=150)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_add_round_sd&expand=150)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vaddsd, ROUNDING = 8))]
@@ -34091,7 +34091,7 @@ pub unsafe fn _mm_maskz_add_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_sub_round_ss&expand=5745)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sub_round_ss&expand=5745)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubss, ROUNDING = 8))]
@@ -34114,7 +34114,7 @@ pub unsafe fn _mm_sub_round_ss<const ROUNDING: i32>(a: __m128, b: __m128) -> __m
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_sub_round_ss&expand=5743)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sub_round_ss&expand=5743)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubss, ROUNDING = 8))]
@@ -34142,7 +34142,7 @@ pub unsafe fn _mm_mask_sub_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_sub_round_ss&expand=5744)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sub_round_ss&expand=5744)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubss, ROUNDING = 8))]
@@ -34169,7 +34169,7 @@ pub unsafe fn _mm_maskz_sub_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_sub_round_sd&expand=5742)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sub_round_sd&expand=5742)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubsd, ROUNDING = 8))]
@@ -34192,7 +34192,7 @@ pub unsafe fn _mm_sub_round_sd<const ROUNDING: i32>(a: __m128d, b: __m128d) -> _
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_sub_round_sd&expand=5740)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sub_round_sd&expand=5740)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubsd, ROUNDING = 8))]
@@ -34220,7 +34220,7 @@ pub unsafe fn _mm_mask_sub_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_sub_round_sd&expand=5741)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sub_round_sd&expand=5741)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsubsd, ROUNDING = 8))]
@@ -34247,7 +34247,7 @@ pub unsafe fn _mm_maskz_sub_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mul_round_ss&expand=3946)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mul_round_ss&expand=3946)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulss, ROUNDING = 8))]
@@ -34270,7 +34270,7 @@ pub unsafe fn _mm_mul_round_ss<const ROUNDING: i32>(a: __m128, b: __m128) -> __m
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_mul_round_ss&expand=3944)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_mul_round_ss&expand=3944)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulss, ROUNDING = 8))]
@@ -34298,7 +34298,7 @@ pub unsafe fn _mm_mask_mul_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_mul_round_ss&expand=3945)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_mul_round_ss&expand=3945)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulss, ROUNDING = 8))]
@@ -34325,7 +34325,7 @@ pub unsafe fn _mm_maskz_mul_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mul_round_sd&expand=3943)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mul_round_sd&expand=3943)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulsd, ROUNDING = 8))]
@@ -34348,7 +34348,7 @@ pub unsafe fn _mm_mul_round_sd<const ROUNDING: i32>(a: __m128d, b: __m128d) -> _
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_mul_round_sd&expand=3941)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_mul_round_sd&expand=3941)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulsd, ROUNDING = 8))]
@@ -34376,7 +34376,7 @@ pub unsafe fn _mm_mask_mul_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_mul_round_sd&expand=3942)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_mul_round_sd&expand=3942)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmulsd, ROUNDING = 8))]
@@ -34403,7 +34403,7 @@ pub unsafe fn _mm_maskz_mul_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_div_round_ss&expand=2174)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_div_round_ss&expand=2174)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivss, ROUNDING = 8))]
@@ -34426,7 +34426,7 @@ pub unsafe fn _mm_div_round_ss<const ROUNDING: i32>(a: __m128, b: __m128) -> __m
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_div_round_ss&expand=2175)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_div_round_ss&expand=2175)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivss, ROUNDING = 8))]
@@ -34454,7 +34454,7 @@ pub unsafe fn _mm_mask_div_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_div_round_ss&expand=2176)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_div_round_ss&expand=2176)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivss, ROUNDING = 8))]
@@ -34481,7 +34481,7 @@ pub unsafe fn _mm_maskz_div_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_div_round_sd&expand=2171)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_div_round_sd&expand=2171)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivsd, ROUNDING = 8))]
@@ -34504,7 +34504,7 @@ pub unsafe fn _mm_div_round_sd<const ROUNDING: i32>(a: __m128d, b: __m128d) -> _
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_div_round_sd&expand=2172)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_div_round_sd&expand=2172)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivsd, ROUNDING = 8))]
@@ -34532,7 +34532,7 @@ pub unsafe fn _mm_mask_div_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_div_round_sd&expand=2173)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_div_round_sd&expand=2173)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vdivsd, ROUNDING = 8))]
@@ -34553,7 +34553,7 @@ pub unsafe fn _mm_maskz_div_round_sd<const ROUNDING: i32>(
/// Compare the lower single-precision (32-bit) floating-point elements in a and b, store the maximum value in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_max_round_ss&expand=3668)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_max_round_ss&expand=3668)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxss, SAE = 8))]
@@ -34570,7 +34570,7 @@ pub unsafe fn _mm_max_round_ss<const SAE: i32>(a: __m128, b: __m128) -> __m128 {
/// Compare the lower single-precision (32-bit) floating-point elements in a and b, store the maximum value in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_max_ss&expand=3672)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_max_ss&expand=3672)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxss, SAE = 8))]
@@ -34592,7 +34592,7 @@ pub unsafe fn _mm_mask_max_round_ss<const SAE: i32>(
/// Compare the lower single-precision (32-bit) floating-point elements in a and b, store the maximum value in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_max_round_ss&expand=3667)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_max_round_ss&expand=3667)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxss, SAE = 8))]
@@ -34609,7 +34609,7 @@ pub unsafe fn _mm_maskz_max_round_ss<const SAE: i32>(k: __mmask8, a: __m128, b:
/// Compare the lower double-precision (64-bit) floating-point elements in a and b, store the maximum value in the lower element of dst, and copy the upper element from a to the upper element of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_max_round_sd&expand=3665)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_max_round_sd&expand=3665)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxsd, SAE = 8))]
@@ -34626,7 +34626,7 @@ pub unsafe fn _mm_max_round_sd<const SAE: i32>(a: __m128d, b: __m128d) -> __m128
/// Compare the lower double-precision (64-bit) floating-point elements in a and b, store the maximum value in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_max_round_sd&expand=3663)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_max_round_sd&expand=3663)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxsd, SAE = 8))]
@@ -34648,7 +34648,7 @@ pub unsafe fn _mm_mask_max_round_sd<const SAE: i32>(
/// Compare the lower double-precision (64-bit) floating-point elements in a and b, store the maximum value in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_max_sd&expand=3670)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_max_sd&expand=3670)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vmaxsd, SAE = 8))]
@@ -34669,7 +34669,7 @@ pub unsafe fn _mm_maskz_max_round_sd<const SAE: i32>(
/// Compare the lower single-precision (32-bit) floating-point elements in a and b, store the minimum value in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_min_round_ss&expand=3782)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_min_round_ss&expand=3782)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminss, SAE = 8))]
@@ -34686,7 +34686,7 @@ pub unsafe fn _mm_min_round_ss<const SAE: i32>(a: __m128, b: __m128) -> __m128 {
/// Compare the lower single-precision (32-bit) floating-point elements in a and b, store the minimum value in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_min_round_Ss&expand=3780)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_min_round_Ss&expand=3780)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminss, SAE = 8))]
@@ -34708,7 +34708,7 @@ pub unsafe fn _mm_mask_min_round_ss<const SAE: i32>(
/// Compare the lower single-precision (32-bit) floating-point elements in a and b, store the minimum value in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_min_round_ss&expand=3781)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_min_round_ss&expand=3781)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminss, SAE = 8))]
@@ -34725,7 +34725,7 @@ pub unsafe fn _mm_maskz_min_round_ss<const SAE: i32>(k: __mmask8, a: __m128, b:
/// Compare the lower double-precision (64-bit) floating-point elements in a and b, store the minimum value in the lower element of dst , and copy the upper element from a to the upper element of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_min_round_sd&expand=3779)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_min_round_sd&expand=3779)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminsd, SAE = 8))]
@@ -34742,7 +34742,7 @@ pub unsafe fn _mm_min_round_sd<const SAE: i32>(a: __m128d, b: __m128d) -> __m128
/// Compare the lower double-precision (64-bit) floating-point elements in a and b, store the minimum value in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_min_round_sd&expand=3777)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_min_round_sd&expand=3777)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminsd, SAE = 8))]
@@ -34764,7 +34764,7 @@ pub unsafe fn _mm_mask_min_round_sd<const SAE: i32>(
/// Compare the lower double-precision (64-bit) floating-point elements in a and b, store the minimum value in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_min_round_Sd&expand=3778)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_min_round_Sd&expand=3778)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vminsd, SAE = 8))]
@@ -34791,7 +34791,7 @@ pub unsafe fn _mm_maskz_min_round_sd<const SAE: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_sqrt_round_ss&expand=5383)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sqrt_round_ss&expand=5383)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtss, ROUNDING = 8))]
@@ -34814,7 +34814,7 @@ pub unsafe fn _mm_sqrt_round_ss<const ROUNDING: i32>(a: __m128, b: __m128) -> __
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_sqrt_round_ss&expand=5381)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sqrt_round_ss&expand=5381)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtss, ROUNDING = 8))]
@@ -34842,7 +34842,7 @@ pub unsafe fn _mm_mask_sqrt_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_sqrt_round_ss&expand=5382)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sqrt_round_ss&expand=5382)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtss, ROUNDING = 8))]
@@ -34869,7 +34869,7 @@ pub unsafe fn _mm_maskz_sqrt_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_sqrt_round_sd&expand=5380)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sqrt_round_sd&expand=5380)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtsd, ROUNDING = 8))]
@@ -34892,7 +34892,7 @@ pub unsafe fn _mm_sqrt_round_sd<const ROUNDING: i32>(a: __m128d, b: __m128d) ->
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_sqrt_round_sd&expand=5378)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_sqrt_round_sd&expand=5378)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtsd, ROUNDING = 8))]
@@ -34920,7 +34920,7 @@ pub unsafe fn _mm_mask_sqrt_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_sqrt_round_sd&expand=5379)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_sqrt_round_sd&expand=5379)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vsqrtsd, ROUNDING = 8))]
@@ -34941,7 +34941,7 @@ pub unsafe fn _mm_maskz_sqrt_round_sd<const ROUNDING: i32>(
/// Convert the exponent of the lower single-precision (32-bit) floating-point element in b to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst. This intrinsic essentially calculates floor(log2(x)) for the lower element.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_getexp_round_ss&expand=2856)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getexp_round_ss&expand=2856)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpss, SAE = 8))]
@@ -34958,7 +34958,7 @@ pub unsafe fn _mm_getexp_round_ss<const SAE: i32>(a: __m128, b: __m128) -> __m12
/// Convert the exponent of the lower single-precision (32-bit) floating-point element in b to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. This intrinsic essentially calculates floor(log2(x)) for the lower element.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_getexp_round_ss&expand=2857)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getexp_round_ss&expand=2857)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpss, SAE = 8))]
@@ -34980,7 +34980,7 @@ pub unsafe fn _mm_mask_getexp_round_ss<const SAE: i32>(
/// Convert the exponent of the lower single-precision (32-bit) floating-point element in b to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. This intrinsic essentially calculates floor(log2(x)) for the lower element.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_getexp_round_ss&expand=2858)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getexp_round_ss&expand=2858)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpss, SAE = 8))]
@@ -35001,7 +35001,7 @@ pub unsafe fn _mm_maskz_getexp_round_ss<const SAE: i32>(
/// Convert the exponent of the lower double-precision (64-bit) floating-point element in b to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst. This intrinsic essentially calculates floor(log2(x)) for the lower element.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_getexp_round_sd&expand=2853)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getexp_round_sd&expand=2853)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpsd, SAE = 8))]
@@ -35018,7 +35018,7 @@ pub unsafe fn _mm_getexp_round_sd<const SAE: i32>(a: __m128d, b: __m128d) -> __m
/// Convert the exponent of the lower double-precision (64-bit) floating-point element in b to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. This intrinsic essentially calculates floor(log2(x)) for the lower element.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_getexp_round_sd&expand=2854)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getexp_round_sd&expand=2854)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpsd, SAE = 8))]
@@ -35040,7 +35040,7 @@ pub unsafe fn _mm_mask_getexp_round_sd<const SAE: i32>(
/// Convert the exponent of the lower double-precision (64-bit) floating-point element in b to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. This intrinsic essentially calculates floor(log2(x)) for the lower element.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_getexp_round_sd&expand=2855)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getexp_round_sd&expand=2855)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetexpsd, SAE = 8))]
@@ -35070,7 +35070,7 @@ pub unsafe fn _mm_maskz_getexp_round_sd<const SAE: i32>(
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_getmant_round_ss&expand=2892)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getmant_round_ss&expand=2892)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0, SAE = 4))]
@@ -35083,8 +35083,8 @@ pub unsafe fn _mm_getmant_round_ss<
a: __m128,
b: __m128,
) -> __m128 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x4();
let b = b.as_f32x4();
@@ -35105,7 +35105,7 @@ pub unsafe fn _mm_getmant_round_ss<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_getmant_round_ss&expand=2893)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getmant_round_ss&expand=2893)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0, SAE = 4))]
@@ -35120,8 +35120,8 @@ pub unsafe fn _mm_mask_getmant_round_ss<
a: __m128,
b: __m128,
) -> __m128 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x4();
let b = b.as_f32x4();
@@ -35142,7 +35142,7 @@ pub unsafe fn _mm_mask_getmant_round_ss<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_getmant_round_ss&expand=2894)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getmant_round_ss&expand=2894)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantss, NORM = 0, SIGN = 0, SAE = 4))]
@@ -35156,8 +35156,8 @@ pub unsafe fn _mm_maskz_getmant_round_ss<
a: __m128,
b: __m128,
) -> __m128 {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x4();
let b = b.as_f32x4();
@@ -35178,7 +35178,7 @@ pub unsafe fn _mm_maskz_getmant_round_ss<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_getmant_round_sd&expand=2889)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_getmant_round_sd&expand=2889)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0, SAE = 4))]
@@ -35191,8 +35191,8 @@ pub unsafe fn _mm_getmant_round_sd<
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x2();
let b = b.as_f64x2();
@@ -35213,7 +35213,7 @@ pub unsafe fn _mm_getmant_round_sd<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_getmant_round_sd&expand=2890)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_getmant_round_sd&expand=2890)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0, SAE = 4))]
@@ -35228,8 +35228,8 @@ pub unsafe fn _mm_mask_getmant_round_sd<
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x2();
let b = b.as_f64x2();
@@ -35250,7 +35250,7 @@ pub unsafe fn _mm_mask_getmant_round_sd<
/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_getmant_round_sd&expand=2891)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_getmant_round_sd&expand=2891)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vgetmantsd, NORM = 0, SIGN = 0, SAE = 4))]
@@ -35264,8 +35264,8 @@ pub unsafe fn _mm_maskz_getmant_round_sd<
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm4!(NORM);
- static_assert_imm2!(SIGN);
+ static_assert_uimm_bits!(NORM, 4);
+ static_assert_uimm_bits!(SIGN, 2);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x2();
let b = b.as_f64x2();
@@ -35283,7 +35283,7 @@ pub unsafe fn _mm_maskz_getmant_round_sd<
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_roundscale_round_ss&expand=4796)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_roundscale_round_ss&expand=4796)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0, SAE = 8))]
@@ -35292,7 +35292,7 @@ pub unsafe fn _mm_roundscale_round_ss<const IMM8: i32, const SAE: i32>(
a: __m128,
b: __m128,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x4();
let b = b.as_f32x4();
@@ -35310,7 +35310,7 @@ pub unsafe fn _mm_roundscale_round_ss<const IMM8: i32, const SAE: i32>(
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_roundscale_round_ss&expand=4794)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_roundscale_round_ss&expand=4794)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0, SAE = 8))]
@@ -35321,7 +35321,7 @@ pub unsafe fn _mm_mask_roundscale_round_ss<const IMM8: i32, const SAE: i32>(
a: __m128,
b: __m128,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x4();
let b = b.as_f32x4();
@@ -35339,7 +35339,7 @@ pub unsafe fn _mm_mask_roundscale_round_ss<const IMM8: i32, const SAE: i32>(
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_roundscale_round_ss&expand=4795)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_roundscale_round_ss&expand=4795)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscaless, IMM8 = 0, SAE = 8))]
@@ -35349,7 +35349,7 @@ pub unsafe fn _mm_maskz_roundscale_round_ss<const IMM8: i32, const SAE: i32>(
a: __m128,
b: __m128,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x4();
let b = b.as_f32x4();
@@ -35367,7 +35367,7 @@ pub unsafe fn _mm_maskz_roundscale_round_ss<const IMM8: i32, const SAE: i32>(
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_roundscale_round_sd&expand=4793)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_roundscale_round_sd&expand=4793)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0, SAE = 8))]
@@ -35376,7 +35376,7 @@ pub unsafe fn _mm_roundscale_round_sd<const IMM8: i32, const SAE: i32>(
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x2();
let b = b.as_f64x2();
@@ -35394,7 +35394,7 @@ pub unsafe fn _mm_roundscale_round_sd<const IMM8: i32, const SAE: i32>(
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_roundscale_round_sd&expand=4791)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_roundscale_round_sd&expand=4791)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0, SAE = 8))]
@@ -35405,7 +35405,7 @@ pub unsafe fn _mm_mask_roundscale_round_sd<const IMM8: i32, const SAE: i32>(
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x2();
let b = b.as_f64x2();
@@ -35423,7 +35423,7 @@ pub unsafe fn _mm_mask_roundscale_round_sd<const IMM8: i32, const SAE: i32>(
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE\
///
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_roundscale_round_sd&expand=4792)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_roundscale_round_sd&expand=4792)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vrndscalesd, IMM8 = 0, SAE = 8))]
@@ -35433,7 +35433,7 @@ pub unsafe fn _mm_maskz_roundscale_round_sd<const IMM8: i32, const SAE: i32>(
a: __m128d,
b: __m128d,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x2();
let b = b.as_f64x2();
@@ -35451,7 +35451,7 @@ pub unsafe fn _mm_maskz_roundscale_round_sd<const IMM8: i32, const SAE: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_scalef_round_ss&expand=4895)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_scalef_round_ss&expand=4895)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefss, ROUNDING = 8))]
@@ -35474,7 +35474,7 @@ pub unsafe fn _mm_scalef_round_ss<const ROUNDING: i32>(a: __m128, b: __m128) ->
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_scalef_round_ss&expand=4893)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_scalef_round_ss&expand=4893)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefss, ROUNDING = 8))]
@@ -35502,7 +35502,7 @@ pub unsafe fn _mm_mask_scalef_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_scalef_round_ss&expand=4894)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_scalef_round_ss&expand=4894)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefss, ROUNDING = 8))]
@@ -35529,7 +35529,7 @@ pub unsafe fn _mm_maskz_scalef_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_scalef_round_sd&expand=4892)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_scalef_round_sd&expand=4892)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefsd, ROUNDING = 8))]
@@ -35552,7 +35552,7 @@ pub unsafe fn _mm_scalef_round_sd<const ROUNDING: i32>(a: __m128d, b: __m128d) -
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_scalef_round_sd&expand=4890)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_scalef_round_sd&expand=4890)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefsd, ROUNDING = 8))]
@@ -35579,7 +35579,7 @@ pub unsafe fn _mm_mask_scalef_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_scalef_round_sd&expand=4891)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_scalef_round_sd&expand=4891)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vscalefsd, ROUNDING = 8))]
@@ -35606,7 +35606,7 @@ pub unsafe fn _mm_maskz_scalef_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_fmadd_round_ss&expand=2573)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fmadd_round_ss&expand=2573)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213ss, ROUNDING = 8))]
@@ -35630,7 +35630,7 @@ pub unsafe fn _mm_fmadd_round_ss<const ROUNDING: i32>(a: __m128, b: __m128, c: _
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fmadd_round_ss&expand=2574)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmadd_round_ss&expand=2574)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213ss, ROUNDING = 8))]
@@ -35661,7 +35661,7 @@ pub unsafe fn _mm_mask_fmadd_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fmadd_round_ss&expand=2576)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmadd_round_ss&expand=2576)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213ss, ROUNDING = 8))]
@@ -35693,7 +35693,7 @@ pub unsafe fn _mm_maskz_fmadd_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fmadd_round_ss&expand=2575)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmadd_round_ss&expand=2575)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213ss, ROUNDING = 8))]
@@ -35724,7 +35724,7 @@ pub unsafe fn _mm_mask3_fmadd_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_fmadd_round_sd&expand=2569)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fmadd_round_sd&expand=2569)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213sd, ROUNDING = 8))]
@@ -35752,7 +35752,7 @@ pub unsafe fn _mm_fmadd_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fmadd_round_sd&expand=2570)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmadd_round_sd&expand=2570)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213sd, ROUNDING = 8))]
@@ -35783,7 +35783,7 @@ pub unsafe fn _mm_mask_fmadd_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fmadd_round_sd&expand=2572)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmadd_round_sd&expand=2572)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213sd, ROUNDING = 8))]
@@ -35815,7 +35815,7 @@ pub unsafe fn _mm_maskz_fmadd_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fmadd_round_Sd&expand=2571)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmadd_round_Sd&expand=2571)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmadd213sd, ROUNDING = 8))]
@@ -35846,7 +35846,7 @@ pub unsafe fn _mm_mask3_fmadd_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_fmsub_round_ss&expand=2659)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fmsub_round_ss&expand=2659)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213ss, ROUNDING = 8))]
@@ -35871,7 +35871,7 @@ pub unsafe fn _mm_fmsub_round_ss<const ROUNDING: i32>(a: __m128, b: __m128, c: _
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fmsub_round_ss&expand=2660)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmsub_round_ss&expand=2660)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213ss, ROUNDING = 8))]
@@ -35903,7 +35903,7 @@ pub unsafe fn _mm_mask_fmsub_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fmsub_round_ss&expand=2662)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmsub_round_ss&expand=2662)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213ss, ROUNDING = 8))]
@@ -35936,7 +35936,7 @@ pub unsafe fn _mm_maskz_fmsub_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fmsub_round_ss&expand=2661)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmsub_round_ss&expand=2661)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213ss, ROUNDING = 8))]
@@ -35968,7 +35968,7 @@ pub unsafe fn _mm_mask3_fmsub_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_fmsub_round_sd&expand=2655)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fmsub_round_sd&expand=2655)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213sd, ROUNDING = 8))]
@@ -35997,7 +35997,7 @@ pub unsafe fn _mm_fmsub_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fmsub_round_sd&expand=2656)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fmsub_round_sd&expand=2656)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213sd, ROUNDING = 8))]
@@ -36029,7 +36029,7 @@ pub unsafe fn _mm_mask_fmsub_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fmsub_round_sd&expand=2658)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fmsub_round_sd&expand=2658)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213sd, ROUNDING = 8))]
@@ -36062,7 +36062,7 @@ pub unsafe fn _mm_maskz_fmsub_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fmsub_round_sd&expand=2657)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fmsub_round_sd&expand=2657)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfmsub213sd, ROUNDING = 8))]
@@ -36094,7 +36094,7 @@ pub unsafe fn _mm_mask3_fmsub_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_fnmadd_round_ss&expand=2739)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fnmadd_round_ss&expand=2739)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213ss, ROUNDING = 8))]
@@ -36119,7 +36119,7 @@ pub unsafe fn _mm_fnmadd_round_ss<const ROUNDING: i32>(a: __m128, b: __m128, c:
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fnmadd_round_ss&expand=2740)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmadd_round_ss&expand=2740)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213ss, ROUNDING = 8))]
@@ -36151,7 +36151,7 @@ pub unsafe fn _mm_mask_fnmadd_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fnmadd_round_ss&expand=2742)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmadd_round_ss&expand=2742)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213ss, ROUNDING = 8))]
@@ -36184,7 +36184,7 @@ pub unsafe fn _mm_maskz_fnmadd_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fnmadd_round_ss&expand=2741)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmadd_round_ss&expand=2741)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213ss, ROUNDING = 8))]
@@ -36216,7 +36216,7 @@ pub unsafe fn _mm_mask3_fnmadd_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_fnmadd_round_sd&expand=2735)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fnmadd_round_sd&expand=2735)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213sd, ROUNDING = 8))]
@@ -36245,7 +36245,7 @@ pub unsafe fn _mm_fnmadd_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fnmadd_round_sd&expand=2736)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmadd_round_sd&expand=2736)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213sd, ROUNDING = 8))]
@@ -36277,7 +36277,7 @@ pub unsafe fn _mm_mask_fnmadd_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fnmadd_round_sd&expand=2738)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmadd_round_sd&expand=2738)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213sd, ROUNDING = 8))]
@@ -36310,7 +36310,7 @@ pub unsafe fn _mm_maskz_fnmadd_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fnmadd_round_Sd&expand=2737)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmadd_round_Sd&expand=2737)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmadd213sd, ROUNDING = 8))]
@@ -36342,7 +36342,7 @@ pub unsafe fn _mm_mask3_fnmadd_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_fnmsub_round_ss&expand=2787)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fnmsub_round_ss&expand=2787)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213ss, ROUNDING = 8))]
@@ -36368,7 +36368,7 @@ pub unsafe fn _mm_fnmsub_round_ss<const ROUNDING: i32>(a: __m128, b: __m128, c:
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fnmsub_round_ss&expand=2788)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmsub_round_ss&expand=2788)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213ss, ROUNDING = 8))]
@@ -36401,7 +36401,7 @@ pub unsafe fn _mm_mask_fnmsub_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fnmsub_round_ss&expand=2790)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmsub_round_ss&expand=2790)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213ss, ROUNDING = 8))]
@@ -36435,7 +36435,7 @@ pub unsafe fn _mm_maskz_fnmsub_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fnmsub_round_ss&expand=2789)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmsub_round_ss&expand=2789)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213ss, ROUNDING = 8))]
@@ -36468,7 +36468,7 @@ pub unsafe fn _mm_mask3_fnmsub_round_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_fnmsub_round_sd&expand=2783)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fnmsub_round_sd&expand=2783)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213sd, ROUNDING = 8))]
@@ -36498,7 +36498,7 @@ pub unsafe fn _mm_fnmsub_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fnmsub_round_sd&expand=2784)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fnmsub_round_sd&expand=2784)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213sd, ROUNDING = 8))]
@@ -36531,7 +36531,7 @@ pub unsafe fn _mm_mask_fnmsub_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fnmsub_round_sd&expand=2786)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fnmsub_round_sd&expand=2786)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213sd, ROUNDING = 8))]
@@ -36565,7 +36565,7 @@ pub unsafe fn _mm_maskz_fnmsub_round_sd<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask3_fnmsub_round_sd&expand=2785)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask3_fnmsub_round_sd&expand=2785)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfnmsub213sd, ROUNDING = 8))]
@@ -36591,13 +36591,13 @@ pub unsafe fn _mm_mask3_fnmsub_round_sd<const ROUNDING: i32>(
/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_fixupimm_ss&expand=2517)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fixupimm_ss&expand=2517)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm_fixupimm_ss<const IMM8: i32>(a: __m128, b: __m128, c: __m128i) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let b = b.as_f32x4();
let c = c.as_i32x4();
@@ -36609,7 +36609,7 @@ pub unsafe fn _mm_fixupimm_ss<const IMM8: i32>(a: __m128, b: __m128, c: __m128i)
/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fixupimm_ss&expand=2518)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fixupimm_ss&expand=2518)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0))]
@@ -36620,7 +36620,7 @@ pub unsafe fn _mm_mask_fixupimm_ss<const IMM8: i32>(
b: __m128,
c: __m128i,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let b = b.as_f32x4();
let c = c.as_i32x4();
@@ -36632,7 +36632,7 @@ pub unsafe fn _mm_mask_fixupimm_ss<const IMM8: i32>(
/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fixupimm_ss&expand=2519)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fixupimm_ss&expand=2519)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0))]
@@ -36643,7 +36643,7 @@ pub unsafe fn _mm_maskz_fixupimm_ss<const IMM8: i32>(
b: __m128,
c: __m128i,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f32x4();
let b = b.as_f32x4();
let c = c.as_i32x4();
@@ -36655,13 +36655,13 @@ pub unsafe fn _mm_maskz_fixupimm_ss<const IMM8: i32>(
/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst. imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_fixupimm_sd&expand=2514)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fixupimm_sd&expand=2514)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0))]
#[rustc_legacy_const_generics(3)]
pub unsafe fn _mm_fixupimm_sd<const IMM8: i32>(a: __m128d, b: __m128d, c: __m128i) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x2();
let b = b.as_f64x2();
let c = c.as_i64x2();
@@ -36673,7 +36673,7 @@ pub unsafe fn _mm_fixupimm_sd<const IMM8: i32>(a: __m128d, b: __m128d, c: __m128
/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fixupimm_sd&expand=2515)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fixupimm_sd&expand=2515)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0))]
@@ -36684,7 +36684,7 @@ pub unsafe fn _mm_mask_fixupimm_sd<const IMM8: i32>(
b: __m128d,
c: __m128i,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x2();
let b = b.as_f64x2();
let c = c.as_i64x2();
@@ -36696,7 +36696,7 @@ pub unsafe fn _mm_mask_fixupimm_sd<const IMM8: i32>(
/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. imm8 is used to set the required flags reporting.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fixupimm_sd&expand=2516)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fixupimm_sd&expand=2516)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0))]
@@ -36707,7 +36707,7 @@ pub unsafe fn _mm_maskz_fixupimm_sd<const IMM8: i32>(
b: __m128d,
c: __m128i,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_f64x2();
let b = b.as_f64x2();
let c = c.as_i64x2();
@@ -36720,7 +36720,7 @@ pub unsafe fn _mm_maskz_fixupimm_sd<const IMM8: i32>(
/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_fixupimm_round_ss&expand=2511)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fixupimm_round_ss&expand=2511)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0, SAE = 8))]
@@ -36730,7 +36730,7 @@ pub unsafe fn _mm_fixupimm_round_ss<const IMM8: i32, const SAE: i32>(
b: __m128,
c: __m128i,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x4();
let b = b.as_f32x4();
@@ -36744,7 +36744,7 @@ pub unsafe fn _mm_fixupimm_round_ss<const IMM8: i32, const SAE: i32>(
/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fixupimm_round_ss&expand=2512)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fixupimm_round_ss&expand=2512)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0, SAE = 8))]
@@ -36755,7 +36755,7 @@ pub unsafe fn _mm_mask_fixupimm_round_ss<const IMM8: i32, const SAE: i32>(
b: __m128,
c: __m128i,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x4();
let b = b.as_f32x4();
@@ -36769,7 +36769,7 @@ pub unsafe fn _mm_mask_fixupimm_round_ss<const IMM8: i32, const SAE: i32>(
/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fixupimm_round_ss&expand=2513)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fixupimm_round_ss&expand=2513)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmss, IMM8 = 0, SAE = 8))]
@@ -36780,7 +36780,7 @@ pub unsafe fn _mm_maskz_fixupimm_round_ss<const IMM8: i32, const SAE: i32>(
b: __m128,
c: __m128i,
) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x4();
let b = b.as_f32x4();
@@ -36794,7 +36794,7 @@ pub unsafe fn _mm_maskz_fixupimm_round_ss<const IMM8: i32, const SAE: i32>(
/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst. imm8 is used to set the required flags reporting.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_fixupimm_round_sd&expand=2508)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fixupimm_round_sd&expand=2508)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0, SAE = 8))]
@@ -36804,7 +36804,7 @@ pub unsafe fn _mm_fixupimm_round_sd<const IMM8: i32, const SAE: i32>(
b: __m128d,
c: __m128i,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x2();
let b = b.as_f64x2();
@@ -36818,7 +36818,7 @@ pub unsafe fn _mm_fixupimm_round_sd<const IMM8: i32, const SAE: i32>(
/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. imm8 is used to set the required flags reporting.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_fixupimm_round_sd&expand=2509)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fixupimm_round_sd&expand=2509)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0, SAE = 8))]
@@ -36829,7 +36829,7 @@ pub unsafe fn _mm_mask_fixupimm_round_sd<const IMM8: i32, const SAE: i32>(
b: __m128d,
c: __m128i,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x2();
let b = b.as_f64x2();
@@ -36843,7 +36843,7 @@ pub unsafe fn _mm_mask_fixupimm_round_sd<const IMM8: i32, const SAE: i32>(
/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. imm8 is used to set the required flags reporting.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_fixupimm_round_sd&expand=2510)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fixupimm_round_sd&expand=2510)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vfixupimmsd, IMM8 = 0, SAE = 8))]
@@ -36854,7 +36854,7 @@ pub unsafe fn _mm_maskz_fixupimm_round_sd<const IMM8: i32, const SAE: i32>(
b: __m128d,
c: __m128i,
) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x2();
let b = b.as_f64x2();
@@ -36867,7 +36867,7 @@ pub unsafe fn _mm_maskz_fixupimm_round_sd<const IMM8: i32, const SAE: i32>(
/// Convert the lower single-precision (32-bit) floating-point element in b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_cvtss_sd&expand=1896)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_cvtss_sd&expand=1896)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2sd))]
@@ -36883,7 +36883,7 @@ pub unsafe fn _mm_mask_cvtss_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128
/// Convert the lower single-precision (32-bit) floating-point element in b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_cvtss_sd&expand=1897)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_cvtss_sd&expand=1897)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2sd))]
@@ -36899,7 +36899,7 @@ pub unsafe fn _mm_maskz_cvtss_sd(k: __mmask8, a: __m128d, b: __m128) -> __m128d
/// Convert the lower double-precision (64-bit) floating-point element in b to a single-precision (32-bit) floating-point element, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_cvtsd_ss&expand=1797)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_cvtsd_ss&expand=1797)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2ss))]
@@ -36915,7 +36915,7 @@ pub unsafe fn _mm_mask_cvtsd_ss(src: __m128, k: __mmask8, a: __m128, b: __m128d)
/// Convert the lower double-precision (64-bit) floating-point element in b to a single-precision (32-bit) floating-point element, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_cvtsd_ss&expand=1798)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_cvtsd_ss&expand=1798)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2ss))]
@@ -36932,7 +36932,7 @@ pub unsafe fn _mm_maskz_cvtsd_ss(k: __mmask8, a: __m128, b: __m128d) -> __m128 {
/// Convert the lower single-precision (32-bit) floating-point element in b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvt_roundss_sd&expand=1371)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundss_sd&expand=1371)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2sd, SAE = 8))]
@@ -36949,7 +36949,7 @@ pub unsafe fn _mm_cvt_roundss_sd<const SAE: i32>(a: __m128d, b: __m128) -> __m12
/// Convert the lower single-precision (32-bit) floating-point element in b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_cvt_roundss_sd&expand=1372)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_cvt_roundss_sd&expand=1372)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2sd, SAE = 8))]
@@ -36971,7 +36971,7 @@ pub unsafe fn _mm_mask_cvt_roundss_sd<const SAE: i32>(
/// Convert the lower single-precision (32-bit) floating-point element in b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_cvt_roundss_sd&expand=1373)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_cvt_roundss_sd&expand=1373)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2sd, SAE = 8))]
@@ -36997,7 +36997,7 @@ pub unsafe fn _mm_maskz_cvt_roundss_sd<const SAE: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvt_roundsd_ss&expand=1361)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundsd_ss&expand=1361)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2ss, ROUNDING = 8))]
@@ -37019,7 +37019,7 @@ pub unsafe fn _mm_cvt_roundsd_ss<const ROUNDING: i32>(a: __m128, b: __m128d) ->
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mask_cvt_roundsd_ss&expand=1362)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_cvt_roundsd_ss&expand=1362)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2ss, ROUNDING = 8))]
@@ -37046,7 +37046,7 @@ pub unsafe fn _mm_mask_cvt_roundsd_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_maskz_cvt_roundsd_ss&expand=1363)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_cvt_roundsd_ss&expand=1363)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2ss, ROUNDING = 8))]
@@ -37072,7 +37072,7 @@ pub unsafe fn _mm_maskz_cvt_roundsd_ss<const ROUNDING: i32>(
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvt_roundss_si32&expand=1374)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundss_si32&expand=1374)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si, ROUNDING = 8))]
@@ -37092,7 +37092,7 @@ pub unsafe fn _mm_cvt_roundss_si32<const ROUNDING: i32>(a: __m128) -> i32 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvt_roundss_i32&expand=1369)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundss_i32&expand=1369)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si, ROUNDING = 8))]
@@ -37112,7 +37112,7 @@ pub unsafe fn _mm_cvt_roundss_i32<const ROUNDING: i32>(a: __m128) -> i32 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvt_roundss_u32&expand=1376)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundss_u32&expand=1376)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2usi, ROUNDING = 8))]
@@ -37126,7 +37126,7 @@ pub unsafe fn _mm_cvt_roundss_u32<const ROUNDING: i32>(a: __m128) -> u32 {
/// Convert the lower single-precision (32-bit) floating-point element in a to a 32-bit integer, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvtss_i32&expand=1893)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtss_i32&expand=1893)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si))]
@@ -37136,7 +37136,7 @@ pub unsafe fn _mm_cvtss_i32(a: __m128) -> i32 {
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 32-bit integer, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvtss_u32&expand=1901)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtss_u32&expand=1901)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2usi))]
@@ -37152,7 +37152,7 @@ pub unsafe fn _mm_cvtss_u32(a: __m128) -> u32 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvt_roundsd_si32&expand=1359)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundsd_si32&expand=1359)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si, ROUNDING = 8))]
@@ -37172,7 +37172,7 @@ pub unsafe fn _mm_cvt_roundsd_si32<const ROUNDING: i32>(a: __m128d) -> i32 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvt_roundsd_i32&expand=1357)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundsd_i32&expand=1357)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si, ROUNDING = 8))]
@@ -37192,7 +37192,7 @@ pub unsafe fn _mm_cvt_roundsd_i32<const ROUNDING: i32>(a: __m128d) -> i32 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=cvt_roundsd_u32&expand=1364)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=cvt_roundsd_u32&expand=1364)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2usi, ROUNDING = 8))]
@@ -37206,7 +37206,7 @@ pub unsafe fn _mm_cvt_roundsd_u32<const ROUNDING: i32>(a: __m128d) -> u32 {
/// Convert the lower double-precision (64-bit) floating-point element in a to a 32-bit integer, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvtsd_i32&expand=1791)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtsd_i32&expand=1791)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si))]
@@ -37216,7 +37216,7 @@ pub unsafe fn _mm_cvtsd_i32(a: __m128d) -> i32 {
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 32-bit integer, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvtsd_u32&expand=1799)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtsd_u32&expand=1799)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2usi))]
@@ -37233,7 +37233,7 @@ pub unsafe fn _mm_cvtsd_u32(a: __m128d) -> u32 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvt_roundi32_ss&expand=1312)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundi32_ss&expand=1312)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsi2ss, ROUNDING = 8))]
@@ -37254,7 +37254,7 @@ pub unsafe fn _mm_cvt_roundi32_ss<const ROUNDING: i32>(a: __m128, b: i32) -> __m
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvt_roundsi32_ss&expand=1366)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundsi32_ss&expand=1366)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsi2ss, ROUNDING = 8))]
@@ -37274,7 +37274,7 @@ pub unsafe fn _mm_cvt_roundsi32_ss<const ROUNDING: i32>(a: __m128, b: i32) -> __
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvt_roundu32_ss&expand=1378)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvt_roundu32_ss&expand=1378)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtusi2ss, ROUNDING = 8))]
@@ -37288,7 +37288,7 @@ pub unsafe fn _mm_cvt_roundu32_ss<const ROUNDING: i32>(a: __m128, b: u32) -> __m
/// Convert the signed 32-bit integer b to a single-precision (32-bit) floating-point element, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvti32_ss&expand=1643)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvti32_ss&expand=1643)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsi2ss))]
@@ -37300,7 +37300,7 @@ pub unsafe fn _mm_cvti32_ss(a: __m128, b: i32) -> __m128 {
/// Convert the signed 32-bit integer b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvti32_sd&expand=1642)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvti32_sd&expand=1642)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsi2sd))]
@@ -37313,7 +37313,7 @@ pub unsafe fn _mm_cvti32_sd(a: __m128d, b: i32) -> __m128d {
/// Convert the lower single-precision (32-bit) floating-point element in a to a 32-bit integer with truncation, and store the result in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvtt_roundss_Si32&expand=1936)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtt_roundss_Si32&expand=1936)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si, SAE = 8))]
@@ -37328,7 +37328,7 @@ pub unsafe fn _mm_cvtt_roundss_si32<const SAE: i32>(a: __m128) -> i32 {
/// Convert the lower single-precision (32-bit) floating-point element in a to a 32-bit integer with truncation, and store the result in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvtt_roundss_i32&expand=1934)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtt_roundss_i32&expand=1934)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si, SAE = 8))]
@@ -37343,7 +37343,7 @@ pub unsafe fn _mm_cvtt_roundss_i32<const SAE: i32>(a: __m128) -> i32 {
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 32-bit integer with truncation, and store the result in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvtt_roundss_u32&expand=1938)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtt_roundss_u32&expand=1938)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2usi, SAE = 8))]
@@ -37357,7 +37357,7 @@ pub unsafe fn _mm_cvtt_roundss_u32<const SAE: i32>(a: __m128) -> u32 {
/// Convert the lower single-precision (32-bit) floating-point element in a to a 32-bit integer with truncation, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_i32&expand=2022)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_i32&expand=2022)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si))]
@@ -37367,7 +37367,7 @@ pub unsafe fn _mm_cvttss_i32(a: __m128) -> i32 {
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 32-bit integer with truncation, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_u32&expand=2026)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_u32&expand=2026)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2usi))]
@@ -37378,7 +37378,7 @@ pub unsafe fn _mm_cvttss_u32(a: __m128) -> u32 {
/// Convert the lower double-precision (64-bit) floating-point element in a to a 32-bit integer with truncation, and store the result in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsd_si32&expand=1930)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundsd_si32&expand=1930)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si, SAE = 8))]
@@ -37393,7 +37393,7 @@ pub unsafe fn _mm_cvtt_roundsd_si32<const SAE: i32>(a: __m128d) -> i32 {
/// Convert the lower double-precision (64-bit) floating-point element in a to a 32-bit integer with truncation, and store the result in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsd_i32&expand=1928)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundsd_i32&expand=1928)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si, SAE = 8))]
@@ -37408,7 +37408,7 @@ pub unsafe fn _mm_cvtt_roundsd_i32<const SAE: i32>(a: __m128d) -> i32 {
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 32-bit integer with truncation, and store the result in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvtt_roundsd_u32&expand=1932)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvtt_roundsd_u32&expand=1932)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2usi, SAE = 8))]
@@ -37422,7 +37422,7 @@ pub unsafe fn _mm_cvtt_roundsd_u32<const SAE: i32>(a: __m128d) -> u32 {
/// Convert the lower double-precision (64-bit) floating-point element in a to a 32-bit integer with truncation, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_i32&expand=2015)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_i32&expand=2015)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si))]
@@ -37432,7 +37432,7 @@ pub unsafe fn _mm_cvttsd_i32(a: __m128d) -> i32 {
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 32-bit integer with truncation, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_u32&expand=2020)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_u32&expand=2020)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2usi))]
@@ -37442,7 +37442,7 @@ pub unsafe fn _mm_cvttsd_u32(a: __m128d) -> u32 {
/// Convert the unsigned 32-bit integer b to a single-precision (32-bit) floating-point element, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu32_ss&expand=2032)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtu32_ss&expand=2032)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtusi2ss))]
@@ -37454,7 +37454,7 @@ pub unsafe fn _mm_cvtu32_ss(a: __m128, b: u32) -> __m128 {
/// Convert the unsigned 32-bit integer b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu32_sd&expand=2031)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtu32_sd&expand=2031)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtusi2sd))]
@@ -37467,13 +37467,13 @@ pub unsafe fn _mm_cvtu32_sd(a: __m128d, b: u32) -> __m128d {
/// Compare the lower single-precision (32-bit) floating-point element in a and b based on the comparison operand specified by imm8, and return the boolean result (0 or 1).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comi_round_ss&expand=1175)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comi_round_ss&expand=1175)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp, IMM5 = 5, SAE = 4))] //should be vcomiss
#[rustc_legacy_const_generics(2, 3)]
pub unsafe fn _mm_comi_round_ss<const IMM5: i32, const SAE: i32>(a: __m128, b: __m128) -> i32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
static_assert_mantissas_sae!(SAE);
let a = a.as_f32x4();
let b = b.as_f32x4();
@@ -37484,13 +37484,13 @@ pub unsafe fn _mm_comi_round_ss<const IMM5: i32, const SAE: i32>(a: __m128, b: _
/// Compare the lower double-precision (64-bit) floating-point element in a and b based on the comparison operand specified by imm8, and return the boolean result (0 or 1).\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comi_round_sd&expand=1174)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comi_round_sd&expand=1174)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcmp, IMM5 = 5, SAE = 4))] //should be vcomisd
#[rustc_legacy_const_generics(2, 3)]
pub unsafe fn _mm_comi_round_sd<const IMM5: i32, const SAE: i32>(a: __m128d, b: __m128d) -> i32 {
- static_assert_imm5!(IMM5);
+ static_assert_uimm_bits!(IMM5, 5);
static_assert_mantissas_sae!(SAE);
let a = a.as_f64x2();
let b = b.as_f64x2();
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512ifma.rs b/library/stdarch/crates/core_arch/src/x86/avx512ifma.rs
index 26aa0320f..128f0db25 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512ifma.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512ifma.rs
@@ -37,7 +37,7 @@ pub unsafe fn _mm512_madd52lo_epu64(a: __m512i, b: __m512i, c: __m512i) -> __m51
/// corresponding unsigned 64-bit integer in `a`, and store the
/// results in `dst`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=vpmadd52&avx512techs=AVX512IFMA52,AVX512VL&expand=3485)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=vpmadd52&avx512techs=AVX512IFMA52,AVX512VL&expand=3485)
#[inline]
#[target_feature(enable = "avx512ifma,avx512vl")]
#[cfg_attr(test, assert_instr(vpmadd52huq))]
@@ -51,7 +51,7 @@ pub unsafe fn _mm256_madd52hi_epu64(a: __m256i, b: __m256i, c: __m256i) -> __m25
/// corresponding unsigned 64-bit integer in `a`, and store the
/// results in `dst`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=vpmadd52&avx512techs=AVX512IFMA52,AVX512VL&expand=3494)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=vpmadd52&avx512techs=AVX512IFMA52,AVX512VL&expand=3494)
#[inline]
#[target_feature(enable = "avx512ifma,avx512vl")]
#[cfg_attr(test, assert_instr(vpmadd52luq))]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512vbmi.rs b/library/stdarch/crates/core_arch/src/x86/avx512vbmi.rs
index f0ff75162..cd3800d38 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512vbmi.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512vbmi.rs
@@ -5,7 +5,7 @@ use stdarch_test::assert_instr;
/// Shuffle 8-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_epi8&expand=4262)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutex2var_epi8&expand=4262)
#[inline]
#[target_feature(enable = "avx512vbmi")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermi2b
@@ -15,7 +15,7 @@ pub unsafe fn _mm512_permutex2var_epi8(a: __m512i, idx: __m512i, b: __m512i) ->
/// Shuffle 8-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutex2var_epi8&expand=4259)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutex2var_epi8&expand=4259)
#[inline]
#[target_feature(enable = "avx512vbmi")]
#[cfg_attr(test, assert_instr(vpermt2b))]
@@ -31,7 +31,7 @@ pub unsafe fn _mm512_mask_permutex2var_epi8(
/// Shuffle 8-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutex2var_epi8&expand=4261)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutex2var_epi8&expand=4261)
#[inline]
#[target_feature(enable = "avx512vbmi")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermi2b
@@ -48,7 +48,7 @@ pub unsafe fn _mm512_maskz_permutex2var_epi8(
/// Shuffle 8-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask2_permutex2var_epi8&expand=4260)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask2_permutex2var_epi8&expand=4260)
#[inline]
#[target_feature(enable = "avx512vbmi")]
#[cfg_attr(test, assert_instr(vpermi2b))]
@@ -64,7 +64,7 @@ pub unsafe fn _mm512_mask2_permutex2var_epi8(
/// Shuffle 8-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_epi8&expand=4258)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutex2var_epi8&expand=4258)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermi2b
@@ -74,7 +74,7 @@ pub unsafe fn _mm256_permutex2var_epi8(a: __m256i, idx: __m256i, b: __m256i) ->
/// Shuffle 8-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutex2var_epi8&expand=4255)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutex2var_epi8&expand=4255)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpermt2b))]
@@ -90,7 +90,7 @@ pub unsafe fn _mm256_mask_permutex2var_epi8(
/// Shuffle 8-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutex2var_epi8&expand=4257)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutex2var_epi8&expand=4257)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermi2b
@@ -107,7 +107,7 @@ pub unsafe fn _mm256_maskz_permutex2var_epi8(
/// Shuffle 8-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask2_permutex2var_epi8&expand=4256)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask2_permutex2var_epi8&expand=4256)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpermi2b))]
@@ -123,7 +123,7 @@ pub unsafe fn _mm256_mask2_permutex2var_epi8(
/// Shuffle 8-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_epi8&expand=4254)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutex2var_epi8&expand=4254)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermi2b
@@ -133,7 +133,7 @@ pub unsafe fn _mm_permutex2var_epi8(a: __m128i, idx: __m128i, b: __m128i) -> __m
/// Shuffle 8-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_permutex2var_epi8&expand=4251)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutex2var_epi8&expand=4251)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpermt2b))]
@@ -149,7 +149,7 @@ pub unsafe fn _mm_mask_permutex2var_epi8(
/// Shuffle 8-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_permutex2var_epi8&expand=4253)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutex2var_epi8&expand=4253)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vperm))] //should be vpermi2b
@@ -166,7 +166,7 @@ pub unsafe fn _mm_maskz_permutex2var_epi8(
/// Shuffle 8-bit integers in a and b across lanes using the corresponding selector and index in idx, and store the results in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask2_permutex2var_epi8&expand=4252)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask2_permutex2var_epi8&expand=4252)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpermi2b))]
@@ -182,7 +182,7 @@ pub unsafe fn _mm_mask2_permutex2var_epi8(
/// Shuffle 8-bit integers in a across lanes using the corresponding index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutexvar_epi8&expand=4316)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_permutexvar_epi8&expand=4316)
#[inline]
#[target_feature(enable = "avx512vbmi")]
#[cfg_attr(test, assert_instr(vpermb))]
@@ -192,7 +192,7 @@ pub unsafe fn _mm512_permutexvar_epi8(idx: __m512i, a: __m512i) -> __m512i {
/// Shuffle 8-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_permutexvar_epi8&expand=4314)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_permutexvar_epi8&expand=4314)
#[inline]
#[target_feature(enable = "avx512vbmi")]
#[cfg_attr(test, assert_instr(vpermb))]
@@ -208,7 +208,7 @@ pub unsafe fn _mm512_mask_permutexvar_epi8(
/// Shuffle 8-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_permutexvar_epi8&expand=4315)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_permutexvar_epi8&expand=4315)
#[inline]
#[target_feature(enable = "avx512vbmi")]
#[cfg_attr(test, assert_instr(vpermb))]
@@ -220,7 +220,7 @@ pub unsafe fn _mm512_maskz_permutexvar_epi8(k: __mmask64, idx: __m512i, a: __m51
/// Shuffle 8-bit integers in a across lanes using the corresponding index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_epi8&expand=4313)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_permutexvar_epi8&expand=4313)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpermb))]
@@ -230,7 +230,7 @@ pub unsafe fn _mm256_permutexvar_epi8(idx: __m256i, a: __m256i) -> __m256i {
/// Shuffle 8-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_permutexvar_epi8&expand=4311)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_permutexvar_epi8&expand=4311)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpermb))]
@@ -246,7 +246,7 @@ pub unsafe fn _mm256_mask_permutexvar_epi8(
/// Shuffle 8-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_permutexvar_epi8&expand=4312)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_permutexvar_epi8&expand=4312)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpermb))]
@@ -258,7 +258,7 @@ pub unsafe fn _mm256_maskz_permutexvar_epi8(k: __mmask32, idx: __m256i, a: __m25
/// Shuffle 8-bit integers in a across lanes using the corresponding index in idx, and store the results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutexvar_epi8&expand=4310)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_permutexvar_epi8&expand=4310)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpermb))]
@@ -268,7 +268,7 @@ pub unsafe fn _mm_permutexvar_epi8(idx: __m128i, a: __m128i) -> __m128i {
/// Shuffle 8-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_permutexvar_epi8&expand=4308)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_permutexvar_epi8&expand=4308)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpermb))]
@@ -284,7 +284,7 @@ pub unsafe fn _mm_mask_permutexvar_epi8(
/// Shuffle 8-bit integers in a across lanes using the corresponding index in idx, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_permutexvar_epi8&expand=4309)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_permutexvar_epi8&expand=4309)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpermb))]
@@ -296,7 +296,7 @@ pub unsafe fn _mm_maskz_permutexvar_epi8(k: __mmask16, idx: __m128i, a: __m128i)
/// For each 64-bit element in b, select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of a, and store the 8 assembled bytes to the corresponding 64-bit element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_multishift_epi64_epi8&expand=4026)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_multishift_epi64_epi8&expand=4026)
#[inline]
#[target_feature(enable = "avx512vbmi")]
#[cfg_attr(test, assert_instr(vpmultishiftqb))]
@@ -306,7 +306,7 @@ pub unsafe fn _mm512_multishift_epi64_epi8(a: __m512i, b: __m512i) -> __m512i {
/// For each 64-bit element in b, select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of a, and store the 8 assembled bytes to the corresponding 64-bit element of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_multishift_epi64_epi8&expand=4024)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_multishift_epi64_epi8&expand=4024)
#[inline]
#[target_feature(enable = "avx512vbmi")]
#[cfg_attr(test, assert_instr(vpmultishiftqb))]
@@ -322,7 +322,7 @@ pub unsafe fn _mm512_mask_multishift_epi64_epi8(
/// For each 64-bit element in b, select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of a, and store the 8 assembled bytes to the corresponding 64-bit element of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_multishift_epi64_epi8&expand=4025)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_multishift_epi64_epi8&expand=4025)
#[inline]
#[target_feature(enable = "avx512vbmi")]
#[cfg_attr(test, assert_instr(vpmultishiftqb))]
@@ -334,7 +334,7 @@ pub unsafe fn _mm512_maskz_multishift_epi64_epi8(k: __mmask64, a: __m512i, b: __
/// For each 64-bit element in b, select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of a, and store the 8 assembled bytes to the corresponding 64-bit element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_multishift_epi64_epi8&expand=4023)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_multishift_epi64_epi8&expand=4023)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpmultishiftqb))]
@@ -344,7 +344,7 @@ pub unsafe fn _mm256_multishift_epi64_epi8(a: __m256i, b: __m256i) -> __m256i {
/// For each 64-bit element in b, select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of a, and store the 8 assembled bytes to the corresponding 64-bit element of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_multishift_epi64_epi8&expand=4021)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_multishift_epi64_epi8&expand=4021)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpmultishiftqb))]
@@ -360,7 +360,7 @@ pub unsafe fn _mm256_mask_multishift_epi64_epi8(
/// For each 64-bit element in b, select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of a, and store the 8 assembled bytes to the corresponding 64-bit element of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_multishift_epi64_epi8&expand=4022)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_multishift_epi64_epi8&expand=4022)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpmultishiftqb))]
@@ -382,7 +382,7 @@ pub unsafe fn _mm_multishift_epi64_epi8(a: __m128i, b: __m128i) -> __m128i {
/// For each 64-bit element in b, select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of a, and store the 8 assembled bytes to the corresponding 64-bit element of dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_multishift_epi64_epi8&expand=4018)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_multishift_epi64_epi8&expand=4018)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpmultishiftqb))]
@@ -398,7 +398,7 @@ pub unsafe fn _mm_mask_multishift_epi64_epi8(
/// For each 64-bit element in b, select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of a, and store the 8 assembled bytes to the corresponding 64-bit element of dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_multishift_epi64_epi8&expand=4019)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_multishift_epi64_epi8&expand=4019)
#[inline]
#[target_feature(enable = "avx512vbmi,avx512vl")]
#[cfg_attr(test, assert_instr(vpmultishiftqb))]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs b/library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs
index 1c81840ba..404443e9e 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512vbmi2.rs
@@ -8,7 +8,7 @@ use stdarch_test::assert_instr;
/// Load contiguous active 16-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_expandloadu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vbmi2")]
pub unsafe fn _mm512_mask_expandloadu_epi16(
@@ -29,7 +29,7 @@ pub unsafe fn _mm512_mask_expandloadu_epi16(
/// Load contiguous active 16-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_expandloadu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vbmi2")]
pub unsafe fn _mm512_maskz_expandloadu_epi16(k: __mmask32, mem_addr: *const i16) -> __m512i {
@@ -46,7 +46,7 @@ pub unsafe fn _mm512_maskz_expandloadu_epi16(k: __mmask32, mem_addr: *const i16)
/// Load contiguous active 16-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_expandloadu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512vbmi2,avx512vl,avx")]
pub unsafe fn _mm256_mask_expandloadu_epi16(
@@ -67,7 +67,7 @@ pub unsafe fn _mm256_mask_expandloadu_epi16(
/// Load contiguous active 16-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_expandloadu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512vbmi2,avx512vl,avx")]
pub unsafe fn _mm256_maskz_expandloadu_epi16(k: __mmask16, mem_addr: *const i16) -> __m256i {
@@ -84,7 +84,7 @@ pub unsafe fn _mm256_maskz_expandloadu_epi16(k: __mmask16, mem_addr: *const i16)
/// Load contiguous active 16-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_expandloadu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512vbmi2,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_expandloadu_epi16(
@@ -105,7 +105,7 @@ pub unsafe fn _mm_mask_expandloadu_epi16(
/// Load contiguous active 16-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_expandloadu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_epi16)
#[inline]
#[target_feature(enable = "avx512f,avx512vbmi2,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_expandloadu_epi16(k: __mmask8, mem_addr: *const i16) -> __m128i {
@@ -122,7 +122,7 @@ pub unsafe fn _mm_maskz_expandloadu_epi16(k: __mmask8, mem_addr: *const i16) ->
/// Load contiguous active 8-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_expandloadu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expandloadu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vbmi2")]
pub unsafe fn _mm512_mask_expandloadu_epi8(
@@ -143,7 +143,7 @@ pub unsafe fn _mm512_mask_expandloadu_epi8(
/// Load contiguous active 8-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_expandloadu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expandloadu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vbmi2")]
pub unsafe fn _mm512_maskz_expandloadu_epi8(k: __mmask64, mem_addr: *const i8) -> __m512i {
@@ -160,7 +160,7 @@ pub unsafe fn _mm512_maskz_expandloadu_epi8(k: __mmask64, mem_addr: *const i8) -
/// Load contiguous active 8-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_expandloadu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expandloadu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vbmi2,avx512vl,avx")]
pub unsafe fn _mm256_mask_expandloadu_epi8(
@@ -181,7 +181,7 @@ pub unsafe fn _mm256_mask_expandloadu_epi8(
/// Load contiguous active 8-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_expandloadu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expandloadu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512bw,avx512vbmi2,avx512vl,avx")]
pub unsafe fn _mm256_maskz_expandloadu_epi8(k: __mmask32, mem_addr: *const i8) -> __m256i {
@@ -198,7 +198,7 @@ pub unsafe fn _mm256_maskz_expandloadu_epi8(k: __mmask32, mem_addr: *const i8) -
/// Load contiguous active 8-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_expandloadu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expandloadu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512vbmi2,avx512vl,avx,sse")]
pub unsafe fn _mm_mask_expandloadu_epi8(
@@ -219,7 +219,7 @@ pub unsafe fn _mm_mask_expandloadu_epi8(
/// Load contiguous active 8-bit integers from unaligned memory at mem_addr (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_expandloadu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expandloadu_epi8)
#[inline]
#[target_feature(enable = "avx512f,avx512vbmi2,avx512vl,avx,sse")]
pub unsafe fn _mm_maskz_expandloadu_epi8(k: __mmask16, mem_addr: *const i8) -> __m128i {
@@ -236,7 +236,7 @@ pub unsafe fn _mm_maskz_expandloadu_epi8(k: __mmask16, mem_addr: *const i8) -> _
/// Contiguously store the active 16-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_compressstoreu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compressstoreu_epi16)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpcompressw))]
@@ -246,7 +246,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask32,
/// Contiguously store the active 16-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_compressstoreu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compressstoreu_epi16)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressw))]
@@ -256,7 +256,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask16,
/// Contiguously store the active 16-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_compressstoreu_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compressstoreu_epi16)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressw))]
@@ -266,7 +266,7 @@ pub unsafe fn _mm_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask8, a:
/// Contiguously store the active 8-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_compressstoreu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compressstoreu_epi8)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpcompressb))]
@@ -276,7 +276,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask64,
/// Contiguously store the active 8-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_compressstoreu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compressstoreu_epi8)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressb))]
@@ -286,7 +286,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask32,
/// Contiguously store the active 8-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_compressstoreu_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compressstoreu_epi8)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressb))]
@@ -296,7 +296,7 @@ pub unsafe fn _mm_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask16, a:
/// Contiguously store the active 16-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_compress_epi16&expand=1192)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compress_epi16&expand=1192)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpcompressw))]
@@ -306,7 +306,7 @@ pub unsafe fn _mm512_mask_compress_epi16(src: __m512i, k: __mmask32, a: __m512i)
/// Contiguously store the active 16-bit integers in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_compress_epi16&expand=1193)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_compress_epi16&expand=1193)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpcompressw))]
@@ -320,7 +320,7 @@ pub unsafe fn _mm512_maskz_compress_epi16(k: __mmask32, a: __m512i) -> __m512i {
/// Contiguously store the active 16-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_compress_epi16&expand=1190)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compress_epi16&expand=1190)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressw))]
@@ -330,7 +330,7 @@ pub unsafe fn _mm256_mask_compress_epi16(src: __m256i, k: __mmask16, a: __m256i)
/// Contiguously store the active 16-bit integers in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_compress_epi16&expand=1191)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_compress_epi16&expand=1191)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressw))]
@@ -344,7 +344,7 @@ pub unsafe fn _mm256_maskz_compress_epi16(k: __mmask16, a: __m256i) -> __m256i {
/// Contiguously store the active 16-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_compress_epi16&expand=1188)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compress_epi16&expand=1188)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressw))]
@@ -354,7 +354,7 @@ pub unsafe fn _mm_mask_compress_epi16(src: __m128i, k: __mmask8, a: __m128i) ->
/// Contiguously store the active 16-bit integers in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_compress_epi16&expand=1189)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_compress_epi16&expand=1189)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressw))]
@@ -368,7 +368,7 @@ pub unsafe fn _mm_maskz_compress_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Contiguously store the active 8-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_compress_epi8&expand=1210)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_compress_epi8&expand=1210)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpcompressb))]
@@ -378,7 +378,7 @@ pub unsafe fn _mm512_mask_compress_epi8(src: __m512i, k: __mmask64, a: __m512i)
/// Contiguously store the active 8-bit integers in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_compress_epi8&expand=1211)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_compress_epi8&expand=1211)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpcompressb))]
@@ -392,7 +392,7 @@ pub unsafe fn _mm512_maskz_compress_epi8(k: __mmask64, a: __m512i) -> __m512i {
/// Contiguously store the active 8-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_compress_epi8&expand=1208)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_compress_epi8&expand=1208)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressb))]
@@ -402,7 +402,7 @@ pub unsafe fn _mm256_mask_compress_epi8(src: __m256i, k: __mmask32, a: __m256i)
/// Contiguously store the active 8-bit integers in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_compress_epi8&expand=1209)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_compress_epi8&expand=1209)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressb))]
@@ -416,7 +416,7 @@ pub unsafe fn _mm256_maskz_compress_epi8(k: __mmask32, a: __m256i) -> __m256i {
/// Contiguously store the active 8-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_compress_epi8&expand=1206)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_compress_epi8&expand=1206)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressb))]
@@ -426,7 +426,7 @@ pub unsafe fn _mm_mask_compress_epi8(src: __m128i, k: __mmask16, a: __m128i) ->
/// Contiguously store the active 8-bit integers in a (those with their respective bit set in zeromask k) to dst, and set the remaining elements to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_compress_epi8&expand=1207)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_compress_epi8&expand=1207)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpcompressb))]
@@ -440,7 +440,7 @@ pub unsafe fn _mm_maskz_compress_epi8(k: __mmask16, a: __m128i) -> __m128i {
/// Load contiguous active 16-bit integers from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_expand_epi16&expand=2310)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expand_epi16&expand=2310)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpexpandw))]
@@ -450,7 +450,7 @@ pub unsafe fn _mm512_mask_expand_epi16(src: __m512i, k: __mmask32, a: __m512i) -
/// Load contiguous active 16-bit integers from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_expand_epi16&expand=2311)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expand_epi16&expand=2311)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpexpandw))]
@@ -464,7 +464,7 @@ pub unsafe fn _mm512_maskz_expand_epi16(k: __mmask32, a: __m512i) -> __m512i {
/// Load contiguous active 16-bit integers from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_expand_epi16&expand=2308)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expand_epi16&expand=2308)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandw))]
@@ -474,7 +474,7 @@ pub unsafe fn _mm256_mask_expand_epi16(src: __m256i, k: __mmask16, a: __m256i) -
/// Load contiguous active 16-bit integers from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_expand_epi16&expand=2309)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expand_epi16&expand=2309)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandw))]
@@ -488,7 +488,7 @@ pub unsafe fn _mm256_maskz_expand_epi16(k: __mmask16, a: __m256i) -> __m256i {
/// Load contiguous active 16-bit integers from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_expand_epi16&expand=2306)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expand_epi16&expand=2306)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandw))]
@@ -498,7 +498,7 @@ pub unsafe fn _mm_mask_expand_epi16(src: __m128i, k: __mmask8, a: __m128i) -> __
/// Load contiguous active 16-bit integers from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_expand_epi16&expand=2307)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expand_epi16&expand=2307)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandw))]
@@ -512,7 +512,7 @@ pub unsafe fn _mm_maskz_expand_epi16(k: __mmask8, a: __m128i) -> __m128i {
/// Load contiguous active 8-bit integers from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_expand_epi8&expand=2328)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_expand_epi8&expand=2328)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpexpandb))]
@@ -522,7 +522,7 @@ pub unsafe fn _mm512_mask_expand_epi8(src: __m512i, k: __mmask64, a: __m512i) ->
/// Load contiguous active 8-bit integers from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_expand_epi8&expand=2329)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_expand_epi8&expand=2329)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpexpandb))]
@@ -536,7 +536,7 @@ pub unsafe fn _mm512_maskz_expand_epi8(k: __mmask64, a: __m512i) -> __m512i {
/// Load contiguous active 8-bit integers from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_expand_epi8&expand=2326)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_expand_epi8&expand=2326)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandb))]
@@ -546,7 +546,7 @@ pub unsafe fn _mm256_mask_expand_epi8(src: __m256i, k: __mmask32, a: __m256i) ->
/// Load contiguous active 8-bit integers from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_expand_epi8&expand=2327)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_expand_epi8&expand=2327)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandb))]
@@ -560,7 +560,7 @@ pub unsafe fn _mm256_maskz_expand_epi8(k: __mmask32, a: __m256i) -> __m256i {
/// Load contiguous active 8-bit integers from a (those with their respective bit set in mask k), and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_expand_epi8&expand=2324)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_expand_epi8&expand=2324)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandb))]
@@ -570,7 +570,7 @@ pub unsafe fn _mm_mask_expand_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __
/// Load contiguous active 8-bit integers from a (those with their respective bit set in mask k), and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_expand_epi8&expand=2325)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_expand_epi8&expand=2325)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpexpandb))]
@@ -584,7 +584,7 @@ pub unsafe fn _mm_maskz_expand_epi8(k: __mmask16, a: __m128i) -> __m128i {
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 64-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shldv_epi64&expand=5087)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shldv_epi64&expand=5087)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldvq))]
@@ -594,7 +594,7 @@ pub unsafe fn _mm512_shldv_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 64-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shldv_epi64&expand=5085)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shldv_epi64&expand=5085)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldvq))]
@@ -605,7 +605,7 @@ pub unsafe fn _mm512_mask_shldv_epi64(a: __m512i, k: __mmask8, b: __m512i, c: __
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 64-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shldv_epi64&expand=5086)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shldv_epi64&expand=5086)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldvq))]
@@ -617,7 +617,7 @@ pub unsafe fn _mm512_maskz_shldv_epi64(k: __mmask8, a: __m512i, b: __m512i, c: _
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 64-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shldv_epi64&expand=5084)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shldv_epi64&expand=5084)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvq))]
@@ -627,7 +627,7 @@ pub unsafe fn _mm256_shldv_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 64-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shldv_epi64&expand=5082)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shldv_epi64&expand=5082)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvq))]
@@ -638,7 +638,7 @@ pub unsafe fn _mm256_mask_shldv_epi64(a: __m256i, k: __mmask8, b: __m256i, c: __
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 64-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shldv_epi64&expand=5083)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shldv_epi64&expand=5083)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvq))]
@@ -650,7 +650,7 @@ pub unsafe fn _mm256_maskz_shldv_epi64(k: __mmask8, a: __m256i, b: __m256i, c: _
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 64-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shldv_epi64&expand=5081)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shldv_epi64&expand=5081)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvq))]
@@ -660,7 +660,7 @@ pub unsafe fn _mm_shldv_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i {
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 64-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shldv_epi64&expand=5079)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shldv_epi64&expand=5079)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvq))]
@@ -671,7 +671,7 @@ pub unsafe fn _mm_mask_shldv_epi64(a: __m128i, k: __mmask8, b: __m128i, c: __m12
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 64-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shldv_epi64&expand=5080)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shldv_epi64&expand=5080)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvq))]
@@ -683,7 +683,7 @@ pub unsafe fn _mm_maskz_shldv_epi64(k: __mmask8, a: __m128i, b: __m128i, c: __m1
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 32-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shldv_epi32&expand=5078)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shldv_epi32&expand=5078)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldvd))]
@@ -693,7 +693,7 @@ pub unsafe fn _mm512_shldv_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 32-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shldv_epi32&expand=5076)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shldv_epi32&expand=5076)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldvd))]
@@ -704,7 +704,7 @@ pub unsafe fn _mm512_mask_shldv_epi32(a: __m512i, k: __mmask16, b: __m512i, c: _
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 32-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shldv_epi32&expand=5077)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shldv_epi32&expand=5077)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldvd))]
@@ -721,7 +721,7 @@ pub unsafe fn _mm512_maskz_shldv_epi32(
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 32-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shldv_epi32&expand=5075)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shldv_epi32&expand=5075)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvd))]
@@ -731,7 +731,7 @@ pub unsafe fn _mm256_shldv_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 32-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shldv_epi32&expand=5073)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shldv_epi32&expand=5073)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvd))]
@@ -742,7 +742,7 @@ pub unsafe fn _mm256_mask_shldv_epi32(a: __m256i, k: __mmask8, b: __m256i, c: __
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 32-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shldv_epi32&expand=5074)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shldv_epi32&expand=5074)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvd))]
@@ -754,7 +754,7 @@ pub unsafe fn _mm256_maskz_shldv_epi32(k: __mmask8, a: __m256i, b: __m256i, c: _
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 32-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shldv_epi32&expand=5072)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shldv_epi32&expand=5072)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvd))]
@@ -764,7 +764,7 @@ pub unsafe fn _mm_shldv_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i {
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 32-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shldv_epi32&expand=5070)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shldv_epi32&expand=5070)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvd))]
@@ -775,7 +775,7 @@ pub unsafe fn _mm_mask_shldv_epi32(a: __m128i, k: __mmask8, b: __m128i, c: __m12
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 32-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shldv_epi32&expand=5071)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shldv_epi32&expand=5071)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvd))]
@@ -787,7 +787,7 @@ pub unsafe fn _mm_maskz_shldv_epi32(k: __mmask8, a: __m128i, b: __m128i, c: __m1
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 16-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shldv_epi16&expand=5069)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shldv_epi16&expand=5069)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldvw))]
@@ -797,7 +797,7 @@ pub unsafe fn _mm512_shldv_epi16(a: __m512i, b: __m512i, c: __m512i) -> __m512i
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 16-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shldv_epi16&expand=5067)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shldv_epi16&expand=5067)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldvw))]
@@ -808,7 +808,7 @@ pub unsafe fn _mm512_mask_shldv_epi16(a: __m512i, k: __mmask32, b: __m512i, c: _
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 16-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shldv_epi16&expand=5068)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shldv_epi16&expand=5068)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldvw))]
@@ -825,7 +825,7 @@ pub unsafe fn _mm512_maskz_shldv_epi16(
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 16-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shldv_epi16&expand=5066)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shldv_epi16&expand=5066)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvw))]
@@ -835,7 +835,7 @@ pub unsafe fn _mm256_shldv_epi16(a: __m256i, b: __m256i, c: __m256i) -> __m256i
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 16-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shldv_epi16&expand=5064)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shldv_epi16&expand=5064)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvw))]
@@ -846,7 +846,7 @@ pub unsafe fn _mm256_mask_shldv_epi16(a: __m256i, k: __mmask16, b: __m256i, c: _
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 16-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shldv_epi16&expand=5065)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shldv_epi16&expand=5065)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvw))]
@@ -863,7 +863,7 @@ pub unsafe fn _mm256_maskz_shldv_epi16(
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 16-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shldv_epi16&expand=5063)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shldv_epi16&expand=5063)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvw))]
@@ -873,7 +873,7 @@ pub unsafe fn _mm_shldv_epi16(a: __m128i, b: __m128i, c: __m128i) -> __m128i {
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 16-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shldv_epi16&expand=5061)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shldv_epi16&expand=5061)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvw))]
@@ -884,7 +884,7 @@ pub unsafe fn _mm_mask_shldv_epi16(a: __m128i, k: __mmask8, b: __m128i, c: __m12
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of c, and store the upper 16-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shldv_epi16&expand=5062)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shldv_epi16&expand=5062)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldvw))]
@@ -896,7 +896,7 @@ pub unsafe fn _mm_maskz_shldv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m1
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 64-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shrdv_epi64&expand=5141)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shrdv_epi64&expand=5141)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshrdvq))]
@@ -906,7 +906,7 @@ pub unsafe fn _mm512_shrdv_epi64(a: __m512i, b: __m512i, c: __m512i) -> __m512i
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 64-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shrdv_epi64&expand=5139)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shrdv_epi64&expand=5139)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshrdvq))]
@@ -917,7 +917,7 @@ pub unsafe fn _mm512_mask_shrdv_epi64(a: __m512i, k: __mmask8, b: __m512i, c: __
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 64-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shrdv_epi64&expand=5140)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shrdv_epi64&expand=5140)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshrdvq))]
@@ -929,7 +929,7 @@ pub unsafe fn _mm512_maskz_shrdv_epi64(k: __mmask8, a: __m512i, b: __m512i, c: _
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 64-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shrdv_epi64&expand=5138)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shrdv_epi64&expand=5138)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvq))]
@@ -939,7 +939,7 @@ pub unsafe fn _mm256_shrdv_epi64(a: __m256i, b: __m256i, c: __m256i) -> __m256i
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 64-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shrdv_epi64&expand=5136)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shrdv_epi64&expand=5136)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvq))]
@@ -950,7 +950,7 @@ pub unsafe fn _mm256_mask_shrdv_epi64(a: __m256i, k: __mmask8, b: __m256i, c: __
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 64-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shrdv_epi64&expand=5137)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shrdv_epi64&expand=5137)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvq))]
@@ -962,7 +962,7 @@ pub unsafe fn _mm256_maskz_shrdv_epi64(k: __mmask8, a: __m256i, b: __m256i, c: _
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 64-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shrdv_epi64&expand=5135)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shrdv_epi64&expand=5135)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvq))]
@@ -972,7 +972,7 @@ pub unsafe fn _mm_shrdv_epi64(a: __m128i, b: __m128i, c: __m128i) -> __m128i {
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 64-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shrdv_epi64&expand=5133)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shrdv_epi64&expand=5133)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvq))]
@@ -983,7 +983,7 @@ pub unsafe fn _mm_mask_shrdv_epi64(a: __m128i, k: __mmask8, b: __m128i, c: __m12
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 64-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shrdv_epi64&expand=5134)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shrdv_epi64&expand=5134)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvq))]
@@ -995,7 +995,7 @@ pub unsafe fn _mm_maskz_shrdv_epi64(k: __mmask8, a: __m128i, b: __m128i, c: __m1
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 32-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shrdv_epi32&expand=5132)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shrdv_epi32&expand=5132)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshrdvd))]
@@ -1005,7 +1005,7 @@ pub unsafe fn _mm512_shrdv_epi32(a: __m512i, b: __m512i, c: __m512i) -> __m512i
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 32-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shrdv_epi32&expand=5130)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shrdv_epi32&expand=5130)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshrdvd))]
@@ -1016,7 +1016,7 @@ pub unsafe fn _mm512_mask_shrdv_epi32(a: __m512i, k: __mmask16, b: __m512i, c: _
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 32-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shrdv_epi32&expand=5131)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shrdv_epi32&expand=5131)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshrdvd))]
@@ -1033,7 +1033,7 @@ pub unsafe fn _mm512_maskz_shrdv_epi32(
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 32-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shrdv_epi32&expand=5129)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shrdv_epi32&expand=5129)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvd))]
@@ -1043,7 +1043,7 @@ pub unsafe fn _mm256_shrdv_epi32(a: __m256i, b: __m256i, c: __m256i) -> __m256i
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 32-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shrdv_epi32&expand=5127)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shrdv_epi32&expand=5127)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvd))]
@@ -1054,7 +1054,7 @@ pub unsafe fn _mm256_mask_shrdv_epi32(a: __m256i, k: __mmask8, b: __m256i, c: __
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 32-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shrdv_epi32&expand=5128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shrdv_epi32&expand=5128)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvd))]
@@ -1066,7 +1066,7 @@ pub unsafe fn _mm256_maskz_shrdv_epi32(k: __mmask8, a: __m256i, b: __m256i, c: _
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 32-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shrdv_epi32&expand=5126)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shrdv_epi32&expand=5126)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvd))]
@@ -1076,7 +1076,7 @@ pub unsafe fn _mm_shrdv_epi32(a: __m128i, b: __m128i, c: __m128i) -> __m128i {
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 32-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shrdv_epi32&expand=5124)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shrdv_epi32&expand=5124)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvd))]
@@ -1087,7 +1087,7 @@ pub unsafe fn _mm_mask_shrdv_epi32(a: __m128i, k: __mmask8, b: __m128i, c: __m12
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 32-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shrdv_epi32&expand=5125)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shrdv_epi32&expand=5125)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvd))]
@@ -1099,7 +1099,7 @@ pub unsafe fn _mm_maskz_shrdv_epi32(k: __mmask8, a: __m128i, b: __m128i, c: __m1
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 16-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shrdv_epi16&expand=5123)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shrdv_epi16&expand=5123)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshrdvw))]
@@ -1109,7 +1109,7 @@ pub unsafe fn _mm512_shrdv_epi16(a: __m512i, b: __m512i, c: __m512i) -> __m512i
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 16-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shrdv_epi16&expand=5121)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shrdv_epi16&expand=5121)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshrdvw))]
@@ -1120,7 +1120,7 @@ pub unsafe fn _mm512_mask_shrdv_epi16(a: __m512i, k: __mmask32, b: __m512i, c: _
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 16-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shrdv_epi16&expand=5122)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shrdv_epi16&expand=5122)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshrdvw))]
@@ -1137,7 +1137,7 @@ pub unsafe fn _mm512_maskz_shrdv_epi16(
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 16-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shrdv_epi16&expand=5120)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shrdv_epi16&expand=5120)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvw))]
@@ -1147,7 +1147,7 @@ pub unsafe fn _mm256_shrdv_epi16(a: __m256i, b: __m256i, c: __m256i) -> __m256i
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 16-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shrdv_epi16&expand=5118)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shrdv_epi16&expand=5118)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvw))]
@@ -1158,7 +1158,7 @@ pub unsafe fn _mm256_mask_shrdv_epi16(a: __m256i, k: __mmask16, b: __m256i, c: _
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 16-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shrdv_epi16&expand=5119)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shrdv_epi16&expand=5119)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvw))]
@@ -1175,7 +1175,7 @@ pub unsafe fn _mm256_maskz_shrdv_epi16(
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 16-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shrdv_epi16&expand=5117)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shrdv_epi16&expand=5117)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvw))]
@@ -1185,7 +1185,7 @@ pub unsafe fn _mm_shrdv_epi16(a: __m128i, b: __m128i, c: __m128i) -> __m128i {
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 16-bits in dst using writemask k (elements are copied from a when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shrdv_epi16&expand=5115)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shrdv_epi16&expand=5115)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvw))]
@@ -1196,7 +1196,7 @@ pub unsafe fn _mm_mask_shrdv_epi16(a: __m128i, k: __mmask8, b: __m128i, c: __m12
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of c, and store the lower 16-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shrdv_epi16&expand=5116)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shrdv_epi16&expand=5116)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshrdvw))]
@@ -1208,13 +1208,13 @@ pub unsafe fn _mm_maskz_shrdv_epi16(k: __mmask8, a: __m128i, b: __m128i, c: __m1
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by imm8 bits, and store the upper 64-bits in dst).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shldi_epi64&expand=5060)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shldi_epi64&expand=5060)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_shldi_epi64<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
transmute(vpshldvq(
a.as_i64x8(),
@@ -1225,7 +1225,7 @@ pub unsafe fn _mm512_shldi_epi64<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by imm8 bits, and store the upper 64-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shldi_epi64&expand=5058)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shldi_epi64&expand=5058)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))]
@@ -1236,7 +1236,7 @@ pub unsafe fn _mm512_mask_shldi_epi64<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
let shf: i64x8 = vpshldvq(
a.as_i64x8(),
@@ -1248,7 +1248,7 @@ pub unsafe fn _mm512_mask_shldi_epi64<const IMM8: i32>(
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by imm8 bits, and store the upper 64-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shldi_epi64&expand=5059)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shldi_epi64&expand=5059)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))]
@@ -1258,7 +1258,7 @@ pub unsafe fn _mm512_maskz_shldi_epi64<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
let shf: i64x8 = vpshldvq(
a.as_i64x8(),
@@ -1271,13 +1271,13 @@ pub unsafe fn _mm512_maskz_shldi_epi64<const IMM8: i32>(
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by imm8 bits, and store the upper 64-bits in dst).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shldi_epi64&expand=5057)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shldi_epi64&expand=5057)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_shldi_epi64<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
transmute(vpshldvq256(
a.as_i64x4(),
@@ -1288,7 +1288,7 @@ pub unsafe fn _mm256_shldi_epi64<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by imm8 bits, and store the upper 64-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shldi_epi64&expand=5055)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shldi_epi64&expand=5055)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))]
@@ -1299,7 +1299,7 @@ pub unsafe fn _mm256_mask_shldi_epi64<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
let shf: i64x4 = vpshldvq256(
a.as_i64x4(),
@@ -1311,7 +1311,7 @@ pub unsafe fn _mm256_mask_shldi_epi64<const IMM8: i32>(
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by imm8 bits, and store the upper 64-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shldi_epi64&expand=5056)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shldi_epi64&expand=5056)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))]
@@ -1321,7 +1321,7 @@ pub unsafe fn _mm256_maskz_shldi_epi64<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
let shf: i64x4 = vpshldvq256(
a.as_i64x4(),
@@ -1334,13 +1334,13 @@ pub unsafe fn _mm256_maskz_shldi_epi64<const IMM8: i32>(
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by imm8 bits, and store the upper 64-bits in dst).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shldi_epi64&expand=5054)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shldi_epi64&expand=5054)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_shldi_epi64<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
transmute(vpshldvq128(
a.as_i64x2(),
@@ -1351,7 +1351,7 @@ pub unsafe fn _mm_shldi_epi64<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by imm8 bits, and store the upper 64-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shldi_epi64&expand=5052)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shldi_epi64&expand=5052)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))]
@@ -1362,7 +1362,7 @@ pub unsafe fn _mm_mask_shldi_epi64<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
let shf: i64x2 = vpshldvq128(a.as_i64x2(), b.as_i64x2(), _mm_set1_epi64x(imm8).as_i64x2());
transmute(simd_select_bitmask(k, shf, src.as_i64x2()))
@@ -1370,7 +1370,7 @@ pub unsafe fn _mm_mask_shldi_epi64<const IMM8: i32>(
/// Concatenate packed 64-bit integers in a and b producing an intermediate 128-bit result. Shift the result left by imm8 bits, and store the upper 64-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shldi_epi64&expand=5053)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shldi_epi64&expand=5053)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))]
@@ -1380,7 +1380,7 @@ pub unsafe fn _mm_maskz_shldi_epi64<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
let shf: i64x2 = vpshldvq128(a.as_i64x2(), b.as_i64x2(), _mm_set1_epi64x(imm8).as_i64x2());
let zero = _mm_setzero_si128().as_i64x2();
@@ -1389,13 +1389,13 @@ pub unsafe fn _mm_maskz_shldi_epi64<const IMM8: i32>(
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by imm8 bits, and store the upper 32-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shldi_epi32&expand=5051)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shldi_epi32&expand=5051)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_shldi_epi32<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(vpshldvd(
a.as_i32x16(),
b.as_i32x16(),
@@ -1405,7 +1405,7 @@ pub unsafe fn _mm512_shldi_epi32<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by imm8 bits, and store the upper 32-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shldi_epi32&expand=5049)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shldi_epi32&expand=5049)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))]
@@ -1416,7 +1416,7 @@ pub unsafe fn _mm512_mask_shldi_epi32<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf: i32x16 = vpshldvd(
a.as_i32x16(),
b.as_i32x16(),
@@ -1427,7 +1427,7 @@ pub unsafe fn _mm512_mask_shldi_epi32<const IMM8: i32>(
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by imm8 bits, and store the upper 32-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shldi_epi32&expand=5050)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shldi_epi32&expand=5050)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))]
@@ -1437,7 +1437,7 @@ pub unsafe fn _mm512_maskz_shldi_epi32<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf: i32x16 = vpshldvd(
a.as_i32x16(),
b.as_i32x16(),
@@ -1449,13 +1449,13 @@ pub unsafe fn _mm512_maskz_shldi_epi32<const IMM8: i32>(
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by imm8 bits, and store the upper 32-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shldi_epi32&expand=5048)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shldi_epi32&expand=5048)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_shldi_epi32<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(vpshldvd256(
a.as_i32x8(),
b.as_i32x8(),
@@ -1465,7 +1465,7 @@ pub unsafe fn _mm256_shldi_epi32<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by imm8 bits, and store the upper 32-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shldi_epi32&expand=5046)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shldi_epi32&expand=5046)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))]
@@ -1476,7 +1476,7 @@ pub unsafe fn _mm256_mask_shldi_epi32<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf: i32x8 = vpshldvd256(
a.as_i32x8(),
b.as_i32x8(),
@@ -1487,7 +1487,7 @@ pub unsafe fn _mm256_mask_shldi_epi32<const IMM8: i32>(
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by imm8 bits, and store the upper 32-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shldi_epi32&expand=5047)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shldi_epi32&expand=5047)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))]
@@ -1497,7 +1497,7 @@ pub unsafe fn _mm256_maskz_shldi_epi32<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf: i32x8 = vpshldvd256(
a.as_i32x8(),
b.as_i32x8(),
@@ -1509,13 +1509,13 @@ pub unsafe fn _mm256_maskz_shldi_epi32<const IMM8: i32>(
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by imm8 bits, and store the upper 32-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shldi_epi32&expand=5045)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shldi_epi32&expand=5045)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_shldi_epi32<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(vpshldvd128(
a.as_i32x4(),
b.as_i32x4(),
@@ -1525,7 +1525,7 @@ pub unsafe fn _mm_shldi_epi32<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by imm8 bits, and store the upper 32-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shldi_epi32&expand=5043)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shldi_epi32&expand=5043)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))]
@@ -1536,14 +1536,14 @@ pub unsafe fn _mm_mask_shldi_epi32<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf: i32x4 = vpshldvd128(a.as_i32x4(), b.as_i32x4(), _mm_set1_epi32(IMM8).as_i32x4());
transmute(simd_select_bitmask(k, shf, src.as_i32x4()))
}
/// Concatenate packed 32-bit integers in a and b producing an intermediate 64-bit result. Shift the result left by imm8 bits, and store the upper 32-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shldi_epi32&expand=5044)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shldi_epi32&expand=5044)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))]
@@ -1553,7 +1553,7 @@ pub unsafe fn _mm_maskz_shldi_epi32<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf: i32x4 = vpshldvd128(a.as_i32x4(), b.as_i32x4(), _mm_set1_epi32(IMM8).as_i32x4());
let zero = _mm_setzero_si128().as_i32x4();
transmute(simd_select_bitmask(k, shf, zero))
@@ -1561,13 +1561,13 @@ pub unsafe fn _mm_maskz_shldi_epi32<const IMM8: i32>(
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by imm8 bits, and store the upper 16-bits in dst).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shldi_epi16&expand=5042)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shldi_epi16&expand=5042)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_shldi_epi16<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
transmute(vpshldvw(
a.as_i16x32(),
@@ -1578,7 +1578,7 @@ pub unsafe fn _mm512_shldi_epi16<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by imm8 bits, and store the upper 16-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shldi_epi16&expand=5040)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shldi_epi16&expand=5040)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))]
@@ -1589,7 +1589,7 @@ pub unsafe fn _mm512_mask_shldi_epi16<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
let shf: i16x32 = vpshldvw(
a.as_i16x32(),
@@ -1601,7 +1601,7 @@ pub unsafe fn _mm512_mask_shldi_epi16<const IMM8: i32>(
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by imm8 bits, and store the upper 16-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shldi_epi16&expand=5041)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shldi_epi16&expand=5041)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))]
@@ -1611,7 +1611,7 @@ pub unsafe fn _mm512_maskz_shldi_epi16<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
let shf: i16x32 = vpshldvw(
a.as_i16x32(),
@@ -1624,13 +1624,13 @@ pub unsafe fn _mm512_maskz_shldi_epi16<const IMM8: i32>(
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by imm8 bits, and store the upper 16-bits in dst).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shldi_epi16&expand=5039)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shldi_epi16&expand=5039)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_shldi_epi16<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
transmute(vpshldvw256(
a.as_i16x16(),
@@ -1641,7 +1641,7 @@ pub unsafe fn _mm256_shldi_epi16<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by imm8 bits, and store the upper 16-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shldi_epi16&expand=5037)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shldi_epi16&expand=5037)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))]
@@ -1652,7 +1652,7 @@ pub unsafe fn _mm256_mask_shldi_epi16<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
let shf: i16x16 = vpshldvw256(
a.as_i16x16(),
@@ -1664,7 +1664,7 @@ pub unsafe fn _mm256_mask_shldi_epi16<const IMM8: i32>(
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by imm8 bits, and store the upper 16-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shldi_epi16&expand=5038)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shldi_epi16&expand=5038)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))]
@@ -1674,7 +1674,7 @@ pub unsafe fn _mm256_maskz_shldi_epi16<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
let shf: i16x16 = vpshldvw256(
a.as_i16x16(),
@@ -1687,13 +1687,13 @@ pub unsafe fn _mm256_maskz_shldi_epi16<const IMM8: i32>(
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by imm8 bits, and store the upper 16-bits in dst).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shldi_epi16&expand=5036)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shldi_epi16&expand=5036)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_shldi_epi16<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
transmute(vpshldvw128(
a.as_i16x8(),
@@ -1704,7 +1704,7 @@ pub unsafe fn _mm_shldi_epi16<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by imm8 bits, and store the upper 16-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shldi_epi16&expand=5034)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shldi_epi16&expand=5034)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))]
@@ -1715,7 +1715,7 @@ pub unsafe fn _mm_mask_shldi_epi16<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
let shf: i16x8 = vpshldvw128(a.as_i16x8(), b.as_i16x8(), _mm_set1_epi16(imm8).as_i16x8());
transmute(simd_select_bitmask(k, shf, src.as_i16x8()))
@@ -1723,7 +1723,7 @@ pub unsafe fn _mm_mask_shldi_epi16<const IMM8: i32>(
/// Concatenate packed 16-bit integers in a and b producing an intermediate 32-bit result. Shift the result left by imm8 bits, and store the upper 16-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shldi_epi16&expand=5035)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shldi_epi16&expand=5035)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))]
@@ -1733,7 +1733,7 @@ pub unsafe fn _mm_maskz_shldi_epi16<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
let shf: i16x8 = vpshldvw128(a.as_i16x8(), b.as_i16x8(), _mm_set1_epi16(imm8).as_i16x8());
let zero = _mm_setzero_si128().as_i16x8();
@@ -1742,13 +1742,13 @@ pub unsafe fn _mm_maskz_shldi_epi16<const IMM8: i32>(
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by imm8 bits, and store the lower 64-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shrdi_epi64&expand=5114)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shrdi_epi64&expand=5114)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_shrdi_epi64<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
transmute(vpshrdvq(
a.as_i64x8(),
@@ -1759,7 +1759,7 @@ pub unsafe fn _mm512_shrdi_epi64<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by imm8 bits, and store the lower 64-bits in dst using writemask k (elements are copied from src" when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shrdi_epi64&expand=5112)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shrdi_epi64&expand=5112)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq
@@ -1770,7 +1770,7 @@ pub unsafe fn _mm512_mask_shrdi_epi64<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
let shf: i64x8 = vpshrdvq(
a.as_i64x8(),
@@ -1782,7 +1782,7 @@ pub unsafe fn _mm512_mask_shrdi_epi64<const IMM8: i32>(
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by imm8 bits, and store the lower 64-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shrdi_epi64&expand=5113)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shrdi_epi64&expand=5113)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 255))] //should be vpshrdq
@@ -1792,7 +1792,7 @@ pub unsafe fn _mm512_maskz_shrdi_epi64<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
let shf: i64x8 = vpshrdvq(
a.as_i64x8(),
@@ -1805,13 +1805,13 @@ pub unsafe fn _mm512_maskz_shrdi_epi64<const IMM8: i32>(
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by imm8 bits, and store the lower 64-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shrdi_epi64&expand=5111)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shrdi_epi64&expand=5111)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_shrdi_epi64<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
transmute(vpshrdvq256(
a.as_i64x4(),
@@ -1822,7 +1822,7 @@ pub unsafe fn _mm256_shrdi_epi64<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by imm8 bits, and store the lower 64-bits in dst using writemask k (elements are copied from src" when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shrdi_epi64&expand=5109)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shrdi_epi64&expand=5109)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq
@@ -1833,7 +1833,7 @@ pub unsafe fn _mm256_mask_shrdi_epi64<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
let shf: i64x4 = vpshrdvq256(
a.as_i64x4(),
@@ -1845,7 +1845,7 @@ pub unsafe fn _mm256_mask_shrdi_epi64<const IMM8: i32>(
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by imm8 bits, and store the lower 64-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shrdi_epi64&expand=5110)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shrdi_epi64&expand=5110)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq
@@ -1855,7 +1855,7 @@ pub unsafe fn _mm256_maskz_shrdi_epi64<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
let shf: i64x4 = vpshrdvq256(
a.as_i64x4(),
@@ -1868,13 +1868,13 @@ pub unsafe fn _mm256_maskz_shrdi_epi64<const IMM8: i32>(
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by imm8 bits, and store the lower 64-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shrdi_epi64&expand=5108)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shrdi_epi64&expand=5108)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_shrdi_epi64<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
transmute(vpshrdvq128(
a.as_i64x2(),
@@ -1885,7 +1885,7 @@ pub unsafe fn _mm_shrdi_epi64<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by imm8 bits, and store the lower 64-bits in dst using writemask k (elements are copied from src" when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shrdi_epi64&expand=5106)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shrdi_epi64&expand=5106)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq
@@ -1896,7 +1896,7 @@ pub unsafe fn _mm_mask_shrdi_epi64<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
let shf: i64x2 = vpshrdvq128(a.as_i64x2(), b.as_i64x2(), _mm_set1_epi64x(imm8).as_i64x2());
transmute(simd_select_bitmask(k, shf, src.as_i64x2()))
@@ -1904,7 +1904,7 @@ pub unsafe fn _mm_mask_shrdi_epi64<const IMM8: i32>(
/// Concatenate packed 64-bit integers in b and a producing an intermediate 128-bit result. Shift the result right by imm8 bits, and store the lower 64-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shrdi_epi64&expand=5107)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shrdi_epi64&expand=5107)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldq, IMM8 = 5))] //should be vpshrdq
@@ -1914,7 +1914,7 @@ pub unsafe fn _mm_maskz_shrdi_epi64<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i64;
let shf: i64x2 = vpshrdvq128(a.as_i64x2(), b.as_i64x2(), _mm_set1_epi64x(imm8).as_i64x2());
let zero = _mm_setzero_si128().as_i64x2();
@@ -1923,13 +1923,13 @@ pub unsafe fn _mm_maskz_shrdi_epi64<const IMM8: i32>(
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by imm8 bits, and store the lower 32-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shrdi_epi32&expand=5105)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shrdi_epi32&expand=5105)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_shrdi_epi32<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(vpshrdvd(
a.as_i32x16(),
b.as_i32x16(),
@@ -1939,7 +1939,7 @@ pub unsafe fn _mm512_shrdi_epi32<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by imm8 bits, and store the lower 32-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shrdi_epi32&expand=5103)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shrdi_epi32&expand=5103)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd
@@ -1950,7 +1950,7 @@ pub unsafe fn _mm512_mask_shrdi_epi32<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf: i32x16 = vpshrdvd(
a.as_i32x16(),
b.as_i32x16(),
@@ -1961,7 +1961,7 @@ pub unsafe fn _mm512_mask_shrdi_epi32<const IMM8: i32>(
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by imm8 bits, and store the lower 32-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shrdi_epi32&expand=5104)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shrdi_epi32&expand=5104)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd
@@ -1971,7 +1971,7 @@ pub unsafe fn _mm512_maskz_shrdi_epi32<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf: i32x16 = vpshrdvd(
a.as_i32x16(),
b.as_i32x16(),
@@ -1983,13 +1983,13 @@ pub unsafe fn _mm512_maskz_shrdi_epi32<const IMM8: i32>(
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by imm8 bits, and store the lower 32-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shrdi_epi32&expand=5102)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shrdi_epi32&expand=5102)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_shrdi_epi32<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(vpshrdvd256(
a.as_i32x8(),
b.as_i32x8(),
@@ -1999,7 +1999,7 @@ pub unsafe fn _mm256_shrdi_epi32<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by imm8 bits, and store the lower 32-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shrdi_epi32&expand=5100)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shrdi_epi32&expand=5100)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd
@@ -2010,7 +2010,7 @@ pub unsafe fn _mm256_mask_shrdi_epi32<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf: i32x8 = vpshrdvd256(
a.as_i32x8(),
b.as_i32x8(),
@@ -2021,7 +2021,7 @@ pub unsafe fn _mm256_mask_shrdi_epi32<const IMM8: i32>(
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by imm8 bits, and store the lower 32-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shrdi_epi32&expand=5101)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shrdi_epi32&expand=5101)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd
@@ -2031,7 +2031,7 @@ pub unsafe fn _mm256_maskz_shrdi_epi32<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf: i32x8 = vpshrdvd256(
a.as_i32x8(),
b.as_i32x8(),
@@ -2043,13 +2043,13 @@ pub unsafe fn _mm256_maskz_shrdi_epi32<const IMM8: i32>(
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by imm8 bits, and store the lower 32-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shrdi_epi32&expand=5099)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shrdi_epi32&expand=5099)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_shrdi_epi32<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(vpshrdvd128(
a.as_i32x4(),
b.as_i32x4(),
@@ -2059,7 +2059,7 @@ pub unsafe fn _mm_shrdi_epi32<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by imm8 bits, and store the lower 32-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shrdi_epi32&expand=5097)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shrdi_epi32&expand=5097)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd
@@ -2070,14 +2070,14 @@ pub unsafe fn _mm_mask_shrdi_epi32<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf: i32x4 = vpshrdvd128(a.as_i32x4(), b.as_i32x4(), _mm_set1_epi32(IMM8).as_i32x4());
transmute(simd_select_bitmask(k, shf, src.as_i32x4()))
}
/// Concatenate packed 32-bit integers in b and a producing an intermediate 64-bit result. Shift the result right by imm8 bits, and store the lower 32-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shrdi_epi32&expand=5098)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shrdi_epi32&expand=5098)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldd, IMM8 = 5))] //should be vpshldd
@@ -2087,7 +2087,7 @@ pub unsafe fn _mm_maskz_shrdi_epi32<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let shf: i32x4 = vpshrdvd128(a.as_i32x4(), b.as_i32x4(), _mm_set1_epi32(IMM8).as_i32x4());
let zero = _mm_setzero_si128().as_i32x4();
transmute(simd_select_bitmask(k, shf, zero))
@@ -2095,13 +2095,13 @@ pub unsafe fn _mm_maskz_shrdi_epi32<const IMM8: i32>(
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by imm8 bits, and store the lower 16-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shrdi_epi16&expand=5096)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_shrdi_epi16&expand=5096)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_shrdi_epi16<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
assert!(imm8 >= 0 && imm8 <= 255);
transmute(vpshrdvw(
@@ -2113,7 +2113,7 @@ pub unsafe fn _mm512_shrdi_epi16<const IMM8: i32>(a: __m512i, b: __m512i) -> __m
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by imm8 bits, and store the lower 16-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shrdi_epi16&expand=5094)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_shrdi_epi16&expand=5094)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw
@@ -2124,7 +2124,7 @@ pub unsafe fn _mm512_mask_shrdi_epi16<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
assert!(imm8 >= 0 && imm8 <= 255);
let shf: i16x32 = vpshrdvw(
@@ -2137,7 +2137,7 @@ pub unsafe fn _mm512_mask_shrdi_epi16<const IMM8: i32>(
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by imm8 bits, and store the lower 16-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shrdi_epi16&expand=5095)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_shrdi_epi16&expand=5095)
#[inline]
#[target_feature(enable = "avx512vbmi2")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw
@@ -2147,7 +2147,7 @@ pub unsafe fn _mm512_maskz_shrdi_epi16<const IMM8: i32>(
a: __m512i,
b: __m512i,
) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
assert!(imm8 >= 0 && imm8 <= 255);
let shf: i16x32 = vpshrdvw(
@@ -2161,13 +2161,13 @@ pub unsafe fn _mm512_maskz_shrdi_epi16<const IMM8: i32>(
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by imm8 bits, and store the lower 16-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shrdi_epi16&expand=5093)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_shrdi_epi16&expand=5093)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_shrdi_epi16<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
assert!(imm8 >= 0 && imm8 <= 255);
transmute(vpshrdvw256(
@@ -2179,7 +2179,7 @@ pub unsafe fn _mm256_shrdi_epi16<const IMM8: i32>(a: __m256i, b: __m256i) -> __m
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by imm8 bits, and store the lower 16-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shrdi_epi16&expand=5091)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_shrdi_epi16&expand=5091)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw
@@ -2190,7 +2190,7 @@ pub unsafe fn _mm256_mask_shrdi_epi16<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
assert!(imm8 >= 0 && imm8 <= 255);
let shf: i16x16 = vpshrdvw256(
@@ -2203,7 +2203,7 @@ pub unsafe fn _mm256_mask_shrdi_epi16<const IMM8: i32>(
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by imm8 bits, and store the lower 16-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shrdi_epi16&expand=5092)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_shrdi_epi16&expand=5092)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw
@@ -2213,7 +2213,7 @@ pub unsafe fn _mm256_maskz_shrdi_epi16<const IMM8: i32>(
a: __m256i,
b: __m256i,
) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
let shf: i16x16 = vpshrdvw256(
a.as_i16x16(),
@@ -2226,13 +2226,13 @@ pub unsafe fn _mm256_maskz_shrdi_epi16<const IMM8: i32>(
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by imm8 bits, and store the lower 16-bits in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shrdi_epi16&expand=5090)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shrdi_epi16&expand=5090)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_shrdi_epi16<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
transmute(vpshrdvw128(
a.as_i16x8(),
@@ -2243,7 +2243,7 @@ pub unsafe fn _mm_shrdi_epi16<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by imm8 bits, and store the lower 16-bits in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shrdi_epi16&expand=5088)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_shrdi_epi16&expand=5088)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw
@@ -2254,7 +2254,7 @@ pub unsafe fn _mm_mask_shrdi_epi16<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
let shf: i16x8 = vpshrdvw128(a.as_i16x8(), b.as_i16x8(), _mm_set1_epi16(imm8).as_i16x8());
transmute(simd_select_bitmask(k, shf, src.as_i16x8()))
@@ -2262,7 +2262,7 @@ pub unsafe fn _mm_mask_shrdi_epi16<const IMM8: i32>(
/// Concatenate packed 16-bit integers in b and a producing an intermediate 32-bit result. Shift the result right by imm8 bits, and store the lower 16-bits in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shrdi_epi16&expand=5089)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_shrdi_epi16&expand=5089)
#[inline]
#[target_feature(enable = "avx512vbmi2,avx512vl")]
#[cfg_attr(test, assert_instr(vpshldw, IMM8 = 5))] //should be vpshrdw
@@ -2272,7 +2272,7 @@ pub unsafe fn _mm_maskz_shrdi_epi16<const IMM8: i32>(
a: __m128i,
b: __m128i,
) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let imm8 = IMM8 as i16;
let shf: i16x8 = vpshrdvw128(a.as_i16x8(), b.as_i16x8(), _mm_set1_epi16(imm8).as_i16x8());
let zero = _mm_setzero_si128().as_i16x8();
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512vnni.rs b/library/stdarch/crates/core_arch/src/x86/avx512vnni.rs
index ff2c773ec..562c1ccb8 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512vnni.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512vnni.rs
@@ -8,7 +8,7 @@ use stdarch_test::assert_instr;
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_dpwssd_epi32&expand=2219)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_dpwssd_epi32&expand=2219)
#[inline]
#[target_feature(enable = "avx512vnni")]
#[cfg_attr(test, assert_instr(vpdpwssd))]
@@ -18,7 +18,7 @@ pub unsafe fn _mm512_dpwssd_epi32(src: __m512i, a: __m512i, b: __m512i) -> __m51
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_dpwssd_epi32&expand=2220)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_dpwssd_epi32&expand=2220)
#[inline]
#[target_feature(enable = "avx512vnni")]
#[cfg_attr(test, assert_instr(vpdpwssd))]
@@ -34,7 +34,7 @@ pub unsafe fn _mm512_mask_dpwssd_epi32(
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_dpwssd_epi32&expand=2221)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_dpwssd_epi32&expand=2221)
#[inline]
#[target_feature(enable = "avx512vnni")]
#[cfg_attr(test, assert_instr(vpdpwssd))]
@@ -51,7 +51,7 @@ pub unsafe fn _mm512_maskz_dpwssd_epi32(
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_dpwssd_epi32&expand=2216)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_dpwssd_epi32&expand=2216)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpwssd))]
@@ -61,7 +61,7 @@ pub unsafe fn _mm256_dpwssd_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m25
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_dpwssd_epi32&expand=2217)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_dpwssd_epi32&expand=2217)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpwssd))]
@@ -77,7 +77,7 @@ pub unsafe fn _mm256_mask_dpwssd_epi32(
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_dpwssd_epi32&expand=2218)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_dpwssd_epi32&expand=2218)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpwssd))]
@@ -94,7 +94,7 @@ pub unsafe fn _mm256_maskz_dpwssd_epi32(
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_dpwssd_epi32&expand=2213)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dpwssd_epi32&expand=2213)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpwssd))]
@@ -104,7 +104,7 @@ pub unsafe fn _mm_dpwssd_epi32(src: __m128i, a: __m128i, b: __m128i) -> __m128i
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_dpwssd_epi32&expand=2214)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_dpwssd_epi32&expand=2214)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpwssd))]
@@ -115,7 +115,7 @@ pub unsafe fn _mm_mask_dpwssd_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_dpwssd_epi32&expand=2215)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_dpwssd_epi32&expand=2215)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpwssd))]
@@ -127,7 +127,7 @@ pub unsafe fn _mm_maskz_dpwssd_epi32(k: __mmask8, src: __m128i, a: __m128i, b: _
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_dpwssds_epi32&expand=2228)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_dpwssds_epi32&expand=2228)
#[inline]
#[target_feature(enable = "avx512vnni")]
#[cfg_attr(test, assert_instr(vpdpwssds))]
@@ -137,7 +137,7 @@ pub unsafe fn _mm512_dpwssds_epi32(src: __m512i, a: __m512i, b: __m512i) -> __m5
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_dpwssds_epi32&expand=2229)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_dpwssds_epi32&expand=2229)
#[inline]
#[target_feature(enable = "avx512vnni")]
#[cfg_attr(test, assert_instr(vpdpwssds))]
@@ -153,7 +153,7 @@ pub unsafe fn _mm512_mask_dpwssds_epi32(
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_dpwssds_epi32&expand=2230)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_dpwssds_epi32&expand=2230)
#[inline]
#[target_feature(enable = "avx512vnni")]
#[cfg_attr(test, assert_instr(vpdpwssds))]
@@ -170,7 +170,7 @@ pub unsafe fn _mm512_maskz_dpwssds_epi32(
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_dpwssds_epi32&expand=2225)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_dpwssds_epi32&expand=2225)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpwssds))]
@@ -180,7 +180,7 @@ pub unsafe fn _mm256_dpwssds_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m2
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_dpwssds_epi32&expand=2226)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_dpwssds_epi32&expand=2226)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpwssds))]
@@ -196,7 +196,7 @@ pub unsafe fn _mm256_mask_dpwssds_epi32(
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_dpwssds_epi32&expand=2227)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_dpwssds_epi32&expand=2227)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpwssds))]
@@ -213,7 +213,7 @@ pub unsafe fn _mm256_maskz_dpwssds_epi32(
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_dpwssds_epi32&expand=2222)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dpwssds_epi32&expand=2222)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpwssds))]
@@ -223,7 +223,7 @@ pub unsafe fn _mm_dpwssds_epi32(src: __m128i, a: __m128i, b: __m128i) -> __m128i
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_dpwssds_epi32&expand=2223)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_dpwssds_epi32&expand=2223)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpwssds))]
@@ -234,7 +234,7 @@ pub unsafe fn _mm_mask_dpwssds_epi32(src: __m128i, k: __mmask8, a: __m128i, b: _
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in a with corresponding 16-bit integers in b, producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_dpwssds_epi32&expand=2224)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_dpwssds_epi32&expand=2224)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpwssds))]
@@ -251,7 +251,7 @@ pub unsafe fn _mm_maskz_dpwssds_epi32(
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_dpbusd_epi32&expand=2201)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_dpbusd_epi32&expand=2201)
#[inline]
#[target_feature(enable = "avx512vnni")]
#[cfg_attr(test, assert_instr(vpdpbusd))]
@@ -261,7 +261,7 @@ pub unsafe fn _mm512_dpbusd_epi32(src: __m512i, a: __m512i, b: __m512i) -> __m51
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_dpbusd_epi32&expand=2202)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_dpbusd_epi32&expand=2202)
#[inline]
#[target_feature(enable = "avx512vnni")]
#[cfg_attr(test, assert_instr(vpdpbusd))]
@@ -277,7 +277,7 @@ pub unsafe fn _mm512_mask_dpbusd_epi32(
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_dpbusd_epi32&expand=2203)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_dpbusd_epi32&expand=2203)
#[inline]
#[target_feature(enable = "avx512vnni")]
#[cfg_attr(test, assert_instr(vpdpbusd))]
@@ -294,7 +294,7 @@ pub unsafe fn _mm512_maskz_dpbusd_epi32(
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_dpbusd_epi32&expand=2198)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_dpbusd_epi32&expand=2198)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpbusd))]
@@ -304,7 +304,7 @@ pub unsafe fn _mm256_dpbusd_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m25
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_dpbusd_epi32&expand=2199)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_dpbusd_epi32&expand=2199)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpbusd))]
@@ -320,7 +320,7 @@ pub unsafe fn _mm256_mask_dpbusd_epi32(
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_dpbusd_epi32&expand=2200)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_dpbusd_epi32&expand=2200)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpbusd))]
@@ -337,7 +337,7 @@ pub unsafe fn _mm256_maskz_dpbusd_epi32(
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_dpbusd_epi32&expand=2195)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dpbusd_epi32&expand=2195)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpbusd))]
@@ -347,7 +347,7 @@ pub unsafe fn _mm_dpbusd_epi32(src: __m128i, a: __m128i, b: __m128i) -> __m128i
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_dpbusd_epi32&expand=2196)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_dpbusd_epi32&expand=2196)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpbusd))]
@@ -358,7 +358,7 @@ pub unsafe fn _mm_mask_dpbusd_epi32(src: __m128i, k: __mmask8, a: __m128i, b: __
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src, and store the packed 32-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_dpbusd_epi32&expand=2197)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_dpbusd_epi32&expand=2197)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpbusd))]
@@ -370,7 +370,7 @@ pub unsafe fn _mm_maskz_dpbusd_epi32(k: __mmask8, src: __m128i, a: __m128i, b: _
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_dpbusds_epi32&expand=2210)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_dpbusds_epi32&expand=2210)
#[inline]
#[target_feature(enable = "avx512vnni")]
#[cfg_attr(test, assert_instr(vpdpbusds))]
@@ -380,7 +380,7 @@ pub unsafe fn _mm512_dpbusds_epi32(src: __m512i, a: __m512i, b: __m512i) -> __m5
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_dpbusds_epi32&expand=2211)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_dpbusds_epi32&expand=2211)
#[inline]
#[target_feature(enable = "avx512vnni")]
#[cfg_attr(test, assert_instr(vpdpbusds))]
@@ -396,7 +396,7 @@ pub unsafe fn _mm512_mask_dpbusds_epi32(
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_dpbusds_epi32&expand=2212)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_dpbusds_epi32&expand=2212)
#[inline]
#[target_feature(enable = "avx512vnni")]
#[cfg_attr(test, assert_instr(vpdpbusds))]
@@ -413,7 +413,7 @@ pub unsafe fn _mm512_maskz_dpbusds_epi32(
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_dpbusds_epi32&expand=2207)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_dpbusds_epi32&expand=2207)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpbusds))]
@@ -423,7 +423,7 @@ pub unsafe fn _mm256_dpbusds_epi32(src: __m256i, a: __m256i, b: __m256i) -> __m2
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_dpbusds_epi32&expand=2208)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_dpbusds_epi32&expand=2208)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpbusds))]
@@ -439,7 +439,7 @@ pub unsafe fn _mm256_mask_dpbusds_epi32(
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_dpbusds_epi32&expand=2209)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_dpbusds_epi32&expand=2209)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpbusds))]
@@ -456,7 +456,7 @@ pub unsafe fn _mm256_maskz_dpbusds_epi32(
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_dpbusds_epi32&expand=2204)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dpbusds_epi32&expand=2204)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpbusds))]
@@ -466,7 +466,7 @@ pub unsafe fn _mm_dpbusds_epi32(src: __m128i, a: __m128i, b: __m128i) -> __m128i
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_dpbusds_epi32&expand=2205)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_dpbusds_epi32&expand=2205)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpbusds))]
@@ -477,7 +477,7 @@ pub unsafe fn _mm_mask_dpbusds_epi32(src: __m128i, k: __mmask8, a: __m128i, b: _
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in a with corresponding signed 8-bit integers in b, producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in src using signed saturation, and store the packed 32-bit results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_dpbusds_epi32&expand=2206)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_dpbusds_epi32&expand=2206)
#[inline]
#[target_feature(enable = "avx512vnni,avx512vl")]
#[cfg_attr(test, assert_instr(vpdpbusds))]
diff --git a/library/stdarch/crates/core_arch/src/x86/avx512vpopcntdq.rs b/library/stdarch/crates/core_arch/src/x86/avx512vpopcntdq.rs
index 3b97c4c19..d196958f0 100644
--- a/library/stdarch/crates/core_arch/src/x86/avx512vpopcntdq.rs
+++ b/library/stdarch/crates/core_arch/src/x86/avx512vpopcntdq.rs
@@ -49,7 +49,7 @@ extern "C" {
/// For each packed 32-bit integer maps the value to the number of logical 1 bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_popcnt_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_popcnt_epi32)
#[inline]
#[target_feature(enable = "avx512vpopcntdq")]
#[cfg_attr(test, assert_instr(vpopcntd))]
@@ -62,7 +62,7 @@ pub unsafe fn _mm512_popcnt_epi32(a: __m512i) -> __m512i {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_popcnt_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_popcnt_epi32)
#[inline]
#[target_feature(enable = "avx512vpopcntdq")]
#[cfg_attr(test, assert_instr(vpopcntd))]
@@ -76,7 +76,7 @@ pub unsafe fn _mm512_maskz_popcnt_epi32(k: __mmask16, a: __m512i) -> __m512i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_popcnt_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_popcnt_epi32)
#[inline]
#[target_feature(enable = "avx512vpopcntdq")]
#[cfg_attr(test, assert_instr(vpopcntd))]
@@ -90,7 +90,7 @@ pub unsafe fn _mm512_mask_popcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) -
/// For each packed 32-bit integer maps the value to the number of logical 1 bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_popcnt_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_popcnt_epi32)
#[inline]
#[target_feature(enable = "avx512vpopcntdq,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntd))]
@@ -103,7 +103,7 @@ pub unsafe fn _mm256_popcnt_epi32(a: __m256i) -> __m256i {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_popcnt_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_popcnt_epi32)
#[inline]
#[target_feature(enable = "avx512vpopcntdq,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntd))]
@@ -117,7 +117,7 @@ pub unsafe fn _mm256_maskz_popcnt_epi32(k: __mmask8, a: __m256i) -> __m256i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_popcnt_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_popcnt_epi32)
#[inline]
#[target_feature(enable = "avx512vpopcntdq,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntd))]
@@ -131,7 +131,7 @@ pub unsafe fn _mm256_mask_popcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) ->
/// For each packed 32-bit integer maps the value to the number of logical 1 bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_epi32)
#[inline]
#[target_feature(enable = "avx512vpopcntdq,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntd))]
@@ -144,7 +144,7 @@ pub unsafe fn _mm_popcnt_epi32(a: __m128i) -> __m128i {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_popcnt_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_popcnt_epi32)
#[inline]
#[target_feature(enable = "avx512vpopcntdq,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntd))]
@@ -158,7 +158,7 @@ pub unsafe fn _mm_maskz_popcnt_epi32(k: __mmask8, a: __m128i) -> __m128i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_popcnt_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_popcnt_epi32)
#[inline]
#[target_feature(enable = "avx512vpopcntdq,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntd))]
@@ -172,7 +172,7 @@ pub unsafe fn _mm_mask_popcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __
/// For each packed 64-bit integer maps the value to the number of logical 1 bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_popcnt_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_popcnt_epi64)
#[inline]
#[target_feature(enable = "avx512vpopcntdq")]
#[cfg_attr(test, assert_instr(vpopcntq))]
@@ -185,7 +185,7 @@ pub unsafe fn _mm512_popcnt_epi64(a: __m512i) -> __m512i {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_popcnt_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_popcnt_epi64)
#[inline]
#[target_feature(enable = "avx512vpopcntdq")]
#[cfg_attr(test, assert_instr(vpopcntq))]
@@ -199,7 +199,7 @@ pub unsafe fn _mm512_maskz_popcnt_epi64(k: __mmask8, a: __m512i) -> __m512i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_popcnt_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_popcnt_epi64)
#[inline]
#[target_feature(enable = "avx512vpopcntdq")]
#[cfg_attr(test, assert_instr(vpopcntq))]
@@ -213,7 +213,7 @@ pub unsafe fn _mm512_mask_popcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) ->
/// For each packed 64-bit integer maps the value to the number of logical 1 bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_popcnt_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_popcnt_epi64)
#[inline]
#[target_feature(enable = "avx512vpopcntdq,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntq))]
@@ -226,7 +226,7 @@ pub unsafe fn _mm256_popcnt_epi64(a: __m256i) -> __m256i {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_popcnt_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_popcnt_epi64)
#[inline]
#[target_feature(enable = "avx512vpopcntdq,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntq))]
@@ -240,7 +240,7 @@ pub unsafe fn _mm256_maskz_popcnt_epi64(k: __mmask8, a: __m256i) -> __m256i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_popcnt_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_popcnt_epi64)
#[inline]
#[target_feature(enable = "avx512vpopcntdq,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntq))]
@@ -254,7 +254,7 @@ pub unsafe fn _mm256_mask_popcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) ->
/// For each packed 64-bit integer maps the value to the number of logical 1 bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_epi64)
#[inline]
#[target_feature(enable = "avx512vpopcntdq,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntq))]
@@ -267,7 +267,7 @@ pub unsafe fn _mm_popcnt_epi64(a: __m128i) -> __m128i {
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_popcnt_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_popcnt_epi64)
#[inline]
#[target_feature(enable = "avx512vpopcntdq,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntq))]
@@ -281,7 +281,7 @@ pub unsafe fn _mm_maskz_popcnt_epi64(k: __mmask8, a: __m128i) -> __m128i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_popcnt_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_popcnt_epi64)
#[inline]
#[target_feature(enable = "avx512vpopcntdq,avx512vl")]
#[cfg_attr(test, assert_instr(vpopcntq))]
diff --git a/library/stdarch/crates/core_arch/src/x86/bmi1.rs b/library/stdarch/crates/core_arch/src/x86/bmi1.rs
index 0f769f33b..713ebf76e 100644
--- a/library/stdarch/crates/core_arch/src/x86/bmi1.rs
+++ b/library/stdarch/crates/core_arch/src/x86/bmi1.rs
@@ -15,7 +15,7 @@ use stdarch_test::assert_instr;
/// Extracts bits in range [`start`, `start` + `length`) from `a` into
/// the least significant bits of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_bextr_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_bextr_u32)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(bextr))]
@@ -30,7 +30,7 @@ pub unsafe fn _bextr_u32(a: u32, start: u32, len: u32) -> u32 {
/// Bits `[7,0]` of `control` specify the index to the first bit in the range
/// to be extracted, and bits `[15,8]` specify the length of the range.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_bextr2_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_bextr2_u32)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(bextr))]
@@ -41,7 +41,7 @@ pub unsafe fn _bextr2_u32(a: u32, control: u32) -> u32 {
/// Bitwise logical `AND` of inverted `a` with `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_andn_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_andn_u32)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(andn))]
@@ -52,7 +52,7 @@ pub unsafe fn _andn_u32(a: u32, b: u32) -> u32 {
/// Extracts lowest set isolated bit.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_blsi_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_blsi_u32)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(blsi))]
@@ -63,7 +63,7 @@ pub unsafe fn _blsi_u32(x: u32) -> u32 {
/// Gets mask up to lowest set bit.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_blsmsk_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_blsmsk_u32)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(blsmsk))]
@@ -76,7 +76,7 @@ pub unsafe fn _blsmsk_u32(x: u32) -> u32 {
///
/// If `x` is sets CF.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_blsr_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_blsr_u32)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(blsr))]
@@ -89,7 +89,7 @@ pub unsafe fn _blsr_u32(x: u32) -> u32 {
///
/// When the source operand is `0`, it returns its size in bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_tzcnt_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_tzcnt_u32)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(tzcnt))]
@@ -102,7 +102,7 @@ pub unsafe fn _tzcnt_u32(x: u32) -> u32 {
///
/// When the source operand is `0`, it returns its size in bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_tzcnt_32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_tzcnt_32)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(tzcnt))]
diff --git a/library/stdarch/crates/core_arch/src/x86/bmi2.rs b/library/stdarch/crates/core_arch/src/x86/bmi2.rs
index b08b8733c..efe7199e9 100644
--- a/library/stdarch/crates/core_arch/src/x86/bmi2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/bmi2.rs
@@ -18,7 +18,7 @@ use stdarch_test::assert_instr;
/// Unsigned multiplication of `a` with `b` returning a pair `(lo, hi)` with
/// the low half and the high half of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mulx_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mulx_u32)
#[inline]
// LLVM BUG (should be mulxl): https://bugs.llvm.org/show_bug.cgi?id=34232
#[cfg_attr(all(test, target_arch = "x86_64"), assert_instr(imul))]
@@ -33,7 +33,7 @@ pub unsafe fn _mulx_u32(a: u32, b: u32, hi: &mut u32) -> u32 {
/// Zeroes higher bits of `a` >= `index`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_bzhi_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_bzhi_u32)
#[inline]
#[target_feature(enable = "bmi2")]
#[cfg_attr(test, assert_instr(bzhi))]
@@ -45,7 +45,7 @@ pub unsafe fn _bzhi_u32(a: u32, index: u32) -> u32 {
/// Scatter contiguous low order bits of `a` to the result at the positions
/// specified by the `mask`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_pdep_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_pdep_u32)
#[inline]
#[target_feature(enable = "bmi2")]
#[cfg_attr(test, assert_instr(pdep))]
@@ -57,7 +57,7 @@ pub unsafe fn _pdep_u32(a: u32, mask: u32) -> u32 {
/// Gathers the bits of `x` specified by the `mask` into the contiguous low
/// order bit positions of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_pext_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_pext_u32)
#[inline]
#[target_feature(enable = "bmi2")]
#[cfg_attr(test, assert_instr(pext))]
diff --git a/library/stdarch/crates/core_arch/src/x86/bswap.rs b/library/stdarch/crates/core_arch/src/x86/bswap.rs
index fcaad26fb..0db9acbd0 100644
--- a/library/stdarch/crates/core_arch/src/x86/bswap.rs
+++ b/library/stdarch/crates/core_arch/src/x86/bswap.rs
@@ -6,7 +6,7 @@ use stdarch_test::assert_instr;
/// Returns an integer with the reversed byte order of x
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_bswap)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_bswap)
#[inline]
#[cfg_attr(test, assert_instr(bswap))]
#[stable(feature = "simd_x86", since = "1.27.0")]
diff --git a/library/stdarch/crates/core_arch/src/x86/cpuid.rs b/library/stdarch/crates/core_arch/src/x86/cpuid.rs
index 2624e8bdf..3bfb35300 100644
--- a/library/stdarch/crates/core_arch/src/x86/cpuid.rs
+++ b/library/stdarch/crates/core_arch/src/x86/cpuid.rs
@@ -46,7 +46,7 @@ pub struct CpuidResult {
/// System Instructions][amd64_ref].
///
/// [wiki_cpuid]: https://en.wikipedia.org/wiki/CPUID
-/// [intel64_ref]: http://www.intel.de/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf
+/// [intel64_ref]: https://cdrdv2-public.intel.com/671110/325383-sdm-vol-2abcd.pdf
/// [amd64_ref]: http://support.amd.com/TechDocs/24594.pdf
#[inline]
#[cfg_attr(test, assert_instr(cpuid))]
diff --git a/library/stdarch/crates/core_arch/src/x86/eflags.rs b/library/stdarch/crates/core_arch/src/x86/eflags.rs
index e9ebdf22b..287894bad 100644
--- a/library/stdarch/crates/core_arch/src/x86/eflags.rs
+++ b/library/stdarch/crates/core_arch/src/x86/eflags.rs
@@ -4,7 +4,7 @@ use crate::arch::asm;
/// Reads EFLAGS.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=__readeflags)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=__readeflags)
#[cfg(target_arch = "x86")]
#[inline(always)]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -21,7 +21,7 @@ pub unsafe fn __readeflags() -> u32 {
/// Reads EFLAGS.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=__readeflags)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=__readeflags)
#[cfg(target_arch = "x86_64")]
#[inline(always)]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -38,7 +38,7 @@ pub unsafe fn __readeflags() -> u64 {
/// Write EFLAGS.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=__writeeflags)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=__writeeflags)
#[cfg(target_arch = "x86")]
#[inline(always)]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -53,7 +53,7 @@ pub unsafe fn __writeeflags(eflags: u32) {
/// Write EFLAGS.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=__writeeflags)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=__writeeflags)
#[cfg(target_arch = "x86_64")]
#[inline(always)]
#[stable(feature = "simd_x86", since = "1.27.0")]
diff --git a/library/stdarch/crates/core_arch/src/x86/f16c.rs b/library/stdarch/crates/core_arch/src/x86/f16c.rs
index 8b25fd65e..88cc78ff6 100644
--- a/library/stdarch/crates/core_arch/src/x86/f16c.rs
+++ b/library/stdarch/crates/core_arch/src/x86/f16c.rs
@@ -1,6 +1,6 @@
//! [F16C intrinsics].
//!
-//! [F16C intrinsics]: https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=fp16&expand=1769
+//! [F16C intrinsics]: https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=fp16&expand=1769
use crate::{
core_arch::{simd::*, x86::*},
@@ -29,6 +29,7 @@ extern "unadjusted" {
#[inline]
#[target_feature(enable = "f16c")]
#[cfg_attr(test, assert_instr("vcvtph2ps"))]
+#[stable(feature = "x86_f16c_intrinsics", since = "1.68.0")]
pub unsafe fn _mm_cvtph_ps(a: __m128i) -> __m128 {
transmute(llvm_vcvtph2ps_128(transmute(a)))
}
@@ -38,6 +39,7 @@ pub unsafe fn _mm_cvtph_ps(a: __m128i) -> __m128 {
#[inline]
#[target_feature(enable = "f16c")]
#[cfg_attr(test, assert_instr("vcvtph2ps"))]
+#[stable(feature = "x86_f16c_intrinsics", since = "1.68.0")]
pub unsafe fn _mm256_cvtph_ps(a: __m128i) -> __m256 {
transmute(llvm_vcvtph2ps_256(transmute(a)))
}
@@ -57,8 +59,9 @@ pub unsafe fn _mm256_cvtph_ps(a: __m128i) -> __m256 {
#[target_feature(enable = "f16c")]
#[cfg_attr(test, assert_instr("vcvtps2ph", IMM_ROUNDING = 0))]
#[rustc_legacy_const_generics(1)]
+#[stable(feature = "x86_f16c_intrinsics", since = "1.68.0")]
pub unsafe fn _mm_cvtps_ph<const IMM_ROUNDING: i32>(a: __m128) -> __m128i {
- static_assert_imm3!(IMM_ROUNDING);
+ static_assert_uimm_bits!(IMM_ROUNDING, 3);
let a = a.as_f32x4();
let r = llvm_vcvtps2ph_128(a, IMM_ROUNDING);
transmute(r)
@@ -78,8 +81,9 @@ pub unsafe fn _mm_cvtps_ph<const IMM_ROUNDING: i32>(a: __m128) -> __m128i {
#[target_feature(enable = "f16c")]
#[cfg_attr(test, assert_instr("vcvtps2ph", IMM_ROUNDING = 0))]
#[rustc_legacy_const_generics(1)]
+#[stable(feature = "x86_f16c_intrinsics", since = "1.68.0")]
pub unsafe fn _mm256_cvtps_ph<const IMM_ROUNDING: i32>(a: __m256) -> __m128i {
- static_assert_imm3!(IMM_ROUNDING);
+ static_assert_uimm_bits!(IMM_ROUNDING, 3);
let a = a.as_f32x8();
let r = llvm_vcvtps2ph_256(a, IMM_ROUNDING);
transmute(r)
diff --git a/library/stdarch/crates/core_arch/src/x86/fma.rs b/library/stdarch/crates/core_arch/src/x86/fma.rs
index 476f4538c..f72c18c07 100644
--- a/library/stdarch/crates/core_arch/src/x86/fma.rs
+++ b/library/stdarch/crates/core_arch/src/x86/fma.rs
@@ -27,7 +27,7 @@ use stdarch_test::assert_instr;
/// Multiplies packed double-precision (64-bit) floating-point elements in `a`
/// and `b`, and add the intermediate result to packed elements in `c`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fmadd_pd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmadd))]
@@ -39,7 +39,7 @@ pub unsafe fn _mm_fmadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d {
/// Multiplies packed double-precision (64-bit) floating-point elements in `a`
/// and `b`, and add the intermediate result to packed elements in `c`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmadd_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fmadd_pd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmadd))]
@@ -51,7 +51,7 @@ pub unsafe fn _mm256_fmadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d {
/// Multiplies packed single-precision (32-bit) floating-point elements in `a`
/// and `b`, and add the intermediate result to packed elements in `c`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fmadd_ps)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmadd))]
@@ -63,7 +63,7 @@ pub unsafe fn _mm_fmadd_ps(a: __m128, b: __m128, c: __m128) -> __m128 {
/// Multiplies packed single-precision (32-bit) floating-point elements in `a`
/// and `b`, and add the intermediate result to packed elements in `c`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmadd_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fmadd_ps)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmadd))]
@@ -77,7 +77,7 @@ pub unsafe fn _mm256_fmadd_ps(a: __m256, b: __m256, c: __m256) -> __m256 {
/// Stores the result in the lower element of the returned value, and copy the
/// upper element from `a` to the upper elements of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fmadd_sd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmadd))]
@@ -91,7 +91,7 @@ pub unsafe fn _mm_fmadd_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d {
/// Stores the result in the lower element of the returned value, and copy the
/// 3 upper elements from `a` to the upper elements of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fmadd_ss)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmadd))]
@@ -104,7 +104,7 @@ pub unsafe fn _mm_fmadd_ss(a: __m128, b: __m128, c: __m128) -> __m128 {
/// and `b`, and alternatively add and subtract packed elements in `c` to/from
/// the intermediate result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmaddsub_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fmaddsub_pd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
@@ -117,7 +117,7 @@ pub unsafe fn _mm_fmaddsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d {
/// and `b`, and alternatively add and subtract packed elements in `c` to/from
/// the intermediate result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmaddsub_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fmaddsub_pd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
@@ -130,7 +130,7 @@ pub unsafe fn _mm256_fmaddsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d
/// and `b`, and alternatively add and subtract packed elements in `c` to/from
/// the intermediate result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmaddsub_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fmaddsub_ps)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
@@ -143,7 +143,7 @@ pub unsafe fn _mm_fmaddsub_ps(a: __m128, b: __m128, c: __m128) -> __m128 {
/// and `b`, and alternatively add and subtract packed elements in `c` to/from
/// the intermediate result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmaddsub_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fmaddsub_ps)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmaddsub))]
@@ -155,7 +155,7 @@ pub unsafe fn _mm256_fmaddsub_ps(a: __m256, b: __m256, c: __m256) -> __m256 {
/// Multiplies packed double-precision (64-bit) floating-point elements in `a`
/// and `b`, and subtract packed elements in `c` from the intermediate result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fmsub_pd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmsub))]
@@ -167,7 +167,7 @@ pub unsafe fn _mm_fmsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d {
/// Multiplies packed double-precision (64-bit) floating-point elements in `a`
/// and `b`, and subtract packed elements in `c` from the intermediate result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmsub_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fmsub_pd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmsub))]
@@ -179,7 +179,7 @@ pub unsafe fn _mm256_fmsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d {
/// Multiplies packed single-precision (32-bit) floating-point elements in `a`
/// and `b`, and subtract packed elements in `c` from the intermediate result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fmsub_ps)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmsub213ps))]
@@ -191,7 +191,7 @@ pub unsafe fn _mm_fmsub_ps(a: __m128, b: __m128, c: __m128) -> __m128 {
/// Multiplies packed single-precision (32-bit) floating-point elements in `a`
/// and `b`, and subtract packed elements in `c` from the intermediate result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmsub_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fmsub_ps)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmsub213ps))]
@@ -205,7 +205,7 @@ pub unsafe fn _mm256_fmsub_ps(a: __m256, b: __m256, c: __m256) -> __m256 {
/// result. Store the result in the lower element of the returned value, and
/// copy the upper element from `a` to the upper elements of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fmsub_sd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmsub))]
@@ -219,7 +219,7 @@ pub unsafe fn _mm_fmsub_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d {
/// result. Store the result in the lower element of the returned value, and
/// copy the 3 upper elements from `a` to the upper elements of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fmsub_ss)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmsub))]
@@ -232,7 +232,7 @@ pub unsafe fn _mm_fmsub_ss(a: __m128, b: __m128, c: __m128) -> __m128 {
/// and `b`, and alternatively subtract and add packed elements in `c` from/to
/// the intermediate result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsubadd_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fmsubadd_pd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
@@ -245,7 +245,7 @@ pub unsafe fn _mm_fmsubadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d {
/// and `b`, and alternatively subtract and add packed elements in `c` from/to
/// the intermediate result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmsubadd_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fmsubadd_pd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
@@ -258,7 +258,7 @@ pub unsafe fn _mm256_fmsubadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d
/// and `b`, and alternatively subtract and add packed elements in `c` from/to
/// the intermediate result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsubadd_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fmsubadd_ps)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
@@ -271,7 +271,7 @@ pub unsafe fn _mm_fmsubadd_ps(a: __m128, b: __m128, c: __m128) -> __m128 {
/// and `b`, and alternatively subtract and add packed elements in `c` from/to
/// the intermediate result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmsubadd_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fmsubadd_ps)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfmsubadd))]
@@ -283,7 +283,7 @@ pub unsafe fn _mm256_fmsubadd_ps(a: __m256, b: __m256, c: __m256) -> __m256 {
/// Multiplies packed double-precision (64-bit) floating-point elements in `a`
/// and `b`, and add the negated intermediate result to packed elements in `c`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fnmadd_pd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfnmadd))]
@@ -295,7 +295,7 @@ pub unsafe fn _mm_fnmadd_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d {
/// Multiplies packed double-precision (64-bit) floating-point elements in `a`
/// and `b`, and add the negated intermediate result to packed elements in `c`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fnmadd_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fnmadd_pd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfnmadd))]
@@ -307,7 +307,7 @@ pub unsafe fn _mm256_fnmadd_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d {
/// Multiplies packed single-precision (32-bit) floating-point elements in `a`
/// and `b`, and add the negated intermediate result to packed elements in `c`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fnmadd_ps)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfnmadd))]
@@ -319,7 +319,7 @@ pub unsafe fn _mm_fnmadd_ps(a: __m128, b: __m128, c: __m128) -> __m128 {
/// Multiplies packed single-precision (32-bit) floating-point elements in `a`
/// and `b`, and add the negated intermediate result to packed elements in `c`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fnmadd_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fnmadd_ps)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfnmadd))]
@@ -333,7 +333,7 @@ pub unsafe fn _mm256_fnmadd_ps(a: __m256, b: __m256, c: __m256) -> __m256 {
/// in `c`. Store the result in the lower element of the returned value, and
/// copy the upper element from `a` to the upper elements of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fnmadd_sd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfnmadd))]
@@ -347,7 +347,7 @@ pub unsafe fn _mm_fnmadd_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d {
/// in `c`. Store the result in the lower element of the returned value, and
/// copy the 3 upper elements from `a` to the upper elements of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fnmadd_ss)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfnmadd))]
@@ -360,7 +360,7 @@ pub unsafe fn _mm_fnmadd_ss(a: __m128, b: __m128, c: __m128) -> __m128 {
/// and `b`, and subtract packed elements in `c` from the negated intermediate
/// result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmsub_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fnmsub_pd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfnmsub))]
@@ -373,7 +373,7 @@ pub unsafe fn _mm_fnmsub_pd(a: __m128d, b: __m128d, c: __m128d) -> __m128d {
/// and `b`, and subtract packed elements in `c` from the negated intermediate
/// result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fnmsub_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fnmsub_pd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfnmsub))]
@@ -386,7 +386,7 @@ pub unsafe fn _mm256_fnmsub_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d {
/// and `b`, and subtract packed elements in `c` from the negated intermediate
/// result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmsub_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fnmsub_ps)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfnmsub))]
@@ -399,7 +399,7 @@ pub unsafe fn _mm_fnmsub_ps(a: __m128, b: __m128, c: __m128) -> __m128 {
/// and `b`, and subtract packed elements in `c` from the negated intermediate
/// result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fnmsub_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_fnmsub_ps)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfnmsub))]
@@ -414,7 +414,7 @@ pub unsafe fn _mm256_fnmsub_ps(a: __m256, b: __m256, c: __m256) -> __m256 {
/// value, and copy the upper element from `a` to the upper elements of the
/// result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmsub_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fnmsub_sd)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfnmsub))]
@@ -429,7 +429,7 @@ pub unsafe fn _mm_fnmsub_sd(a: __m128d, b: __m128d, c: __m128d) -> __m128d {
/// returned value, and copy the 3 upper elements from `a` to the upper
/// elements of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmsub_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_fnmsub_ss)
#[inline]
#[target_feature(enable = "fma")]
#[cfg_attr(test, assert_instr(vfnmsub))]
diff --git a/library/stdarch/crates/core_arch/src/x86/fxsr.rs b/library/stdarch/crates/core_arch/src/x86/fxsr.rs
index 8ea1bfab7..8b9106473 100644
--- a/library/stdarch/crates/core_arch/src/x86/fxsr.rs
+++ b/library/stdarch/crates/core_arch/src/x86/fxsr.rs
@@ -22,7 +22,7 @@ extern "C" {
/// [fxsave]: http://www.felixcloutier.com/x86/FXSAVE.html
/// [fxrstor]: http://www.felixcloutier.com/x86/FXRSTOR.html
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_fxsave)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_fxsave)
#[inline]
#[target_feature(enable = "fxsr")]
#[cfg_attr(test, assert_instr(fxsave))]
@@ -46,7 +46,7 @@ pub unsafe fn _fxsave(mem_addr: *mut u8) {
/// [fxsave]: http://www.felixcloutier.com/x86/FXSAVE.html
/// [fxrstor]: http://www.felixcloutier.com/x86/FXRSTOR.html
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_fxrstor)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_fxrstor)
#[inline]
#[target_feature(enable = "fxsr")]
#[cfg_attr(test, assert_instr(fxrstor))]
diff --git a/library/stdarch/crates/core_arch/src/x86/gfni.rs b/library/stdarch/crates/core_arch/src/x86/gfni.rs
index 679b2548a..7c2195e71 100644
--- a/library/stdarch/crates/core_arch/src/x86/gfni.rs
+++ b/library/stdarch/crates/core_arch/src/x86/gfni.rs
@@ -63,7 +63,7 @@ extern "C" {
/// The field is in polynomial representation with the reduction polynomial
/// x^8 + x^4 + x^3 + x + 1.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8mul_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_gf2p8mul_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512f")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
@@ -78,7 +78,7 @@ pub unsafe fn _mm512_gf2p8mul_epi8(a: __m512i, b: __m512i) -> __m512i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_gf2p8mul_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_gf2p8mul_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512f")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
@@ -102,7 +102,7 @@ pub unsafe fn _mm512_mask_gf2p8mul_epi8(
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_gf2p8mul_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_gf2p8mul_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512f")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
@@ -119,7 +119,7 @@ pub unsafe fn _mm512_maskz_gf2p8mul_epi8(k: __mmask64, a: __m512i, b: __m512i) -
/// The field is in polynomial representation with the reduction polynomial
/// x^8 + x^4 + x^3 + x + 1.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_gf2p8mul_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_gf2p8mul_epi8)
#[inline]
#[target_feature(enable = "gfni,avx")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
@@ -134,7 +134,7 @@ pub unsafe fn _mm256_gf2p8mul_epi8(a: __m256i, b: __m256i) -> __m256i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_gf2p8mul_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_gf2p8mul_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
@@ -158,7 +158,7 @@ pub unsafe fn _mm256_mask_gf2p8mul_epi8(
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_gf2p8mul_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_gf2p8mul_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
@@ -175,7 +175,7 @@ pub unsafe fn _mm256_maskz_gf2p8mul_epi8(k: __mmask32, a: __m256i, b: __m256i) -
/// The field is in polynomial representation with the reduction polynomial
/// x^8 + x^4 + x^3 + x + 1.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8mul_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_gf2p8mul_epi8)
#[inline]
#[target_feature(enable = "gfni")]
#[cfg_attr(test, assert_instr(gf2p8mulb))]
@@ -190,7 +190,7 @@ pub unsafe fn _mm_gf2p8mul_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_gf2p8mul_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_gf2p8mul_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
@@ -214,7 +214,7 @@ pub unsafe fn _mm_mask_gf2p8mul_epi8(
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_gf2p8mul_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_gf2p8mul_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vgf2p8mulb))]
@@ -232,13 +232,13 @@ pub unsafe fn _mm_maskz_gf2p8mul_epi8(k: __mmask16, a: __m128i, b: __m128i) -> _
/// and b being a constant 8-bit immediate value.
/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8affine_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_gf2p8affine_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512f")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_gf2p8affine_epi64_epi8<const B: i32>(x: __m512i, a: __m512i) -> __m512i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x64();
let a = a.as_i8x64();
@@ -254,7 +254,7 @@ pub unsafe fn _mm512_gf2p8affine_epi64_epi8<const B: i32>(x: __m512i, a: __m512i
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_gf2p8affine_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_gf2p8affine_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512f")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
@@ -264,7 +264,7 @@ pub unsafe fn _mm512_maskz_gf2p8affine_epi64_epi8<const B: i32>(
x: __m512i,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let zero = _mm512_setzero_si512().as_i8x64();
let x = x.as_i8x64();
@@ -281,7 +281,7 @@ pub unsafe fn _mm512_maskz_gf2p8affine_epi64_epi8<const B: i32>(
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_gf2p8affine_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_gf2p8affine_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512f")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
@@ -292,7 +292,7 @@ pub unsafe fn _mm512_mask_gf2p8affine_epi64_epi8<const B: i32>(
x: __m512i,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x64();
let a = a.as_i8x64();
@@ -305,13 +305,13 @@ pub unsafe fn _mm512_mask_gf2p8affine_epi64_epi8<const B: i32>(
/// and b being a constant 8-bit immediate value.
/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_gf2p8affine_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_gf2p8affine_epi8)
#[inline]
#[target_feature(enable = "gfni,avx")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_gf2p8affine_epi64_epi8<const B: i32>(x: __m256i, a: __m256i) -> __m256i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x32();
let a = a.as_i8x32();
@@ -327,7 +327,7 @@ pub unsafe fn _mm256_gf2p8affine_epi64_epi8<const B: i32>(x: __m256i, a: __m256i
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_gf2p8affine_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_gf2p8affine_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
@@ -337,7 +337,7 @@ pub unsafe fn _mm256_maskz_gf2p8affine_epi64_epi8<const B: i32>(
x: __m256i,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let zero = _mm256_setzero_si256().as_i8x32();
let x = x.as_i8x32();
@@ -354,7 +354,7 @@ pub unsafe fn _mm256_maskz_gf2p8affine_epi64_epi8<const B: i32>(
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_gf2p8affine_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_gf2p8affine_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
@@ -365,7 +365,7 @@ pub unsafe fn _mm256_mask_gf2p8affine_epi64_epi8<const B: i32>(
x: __m256i,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x32();
let a = a.as_i8x32();
@@ -378,13 +378,13 @@ pub unsafe fn _mm256_mask_gf2p8affine_epi64_epi8<const B: i32>(
/// and b being a constant 8-bit immediate value.
/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8affine_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_gf2p8affine_epi8)
#[inline]
#[target_feature(enable = "gfni")]
#[cfg_attr(test, assert_instr(gf2p8affineqb, B = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_gf2p8affine_epi64_epi8<const B: i32>(x: __m128i, a: __m128i) -> __m128i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x16();
let a = a.as_i8x16();
@@ -400,7 +400,7 @@ pub unsafe fn _mm_gf2p8affine_epi64_epi8<const B: i32>(x: __m128i, a: __m128i) -
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_gf2p8affine_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_gf2p8affine_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
@@ -410,7 +410,7 @@ pub unsafe fn _mm_maskz_gf2p8affine_epi64_epi8<const B: i32>(
x: __m128i,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let zero = _mm_setzero_si128().as_i8x16();
let x = x.as_i8x16();
@@ -427,7 +427,7 @@ pub unsafe fn _mm_maskz_gf2p8affine_epi64_epi8<const B: i32>(
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_gf2p8affine_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_gf2p8affine_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))]
@@ -438,7 +438,7 @@ pub unsafe fn _mm_mask_gf2p8affine_epi64_epi8<const B: i32>(
x: __m128i,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x16();
let a = a.as_i8x16();
@@ -453,13 +453,13 @@ pub unsafe fn _mm_mask_gf2p8affine_epi64_epi8<const B: i32>(
/// The inverse of 0 is 0.
/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8affineinv_epi64_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_gf2p8affineinv_epi64_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512f")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m512i, a: __m512i) -> __m512i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x64();
let a = a.as_i8x64();
@@ -477,7 +477,7 @@ pub unsafe fn _mm512_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m512i, a: __m5
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_gf2p8affineinv_epi64_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_gf2p8affineinv_epi64_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512f")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
@@ -487,7 +487,7 @@ pub unsafe fn _mm512_maskz_gf2p8affineinv_epi64_epi8<const B: i32>(
x: __m512i,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let zero = _mm512_setzero_si512().as_i8x64();
let x = x.as_i8x64();
@@ -506,7 +506,7 @@ pub unsafe fn _mm512_maskz_gf2p8affineinv_epi64_epi8<const B: i32>(
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_gf2p8affineinv_epi64_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_gf2p8affineinv_epi64_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512f")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
@@ -517,7 +517,7 @@ pub unsafe fn _mm512_mask_gf2p8affineinv_epi64_epi8<const B: i32>(
x: __m512i,
a: __m512i,
) -> __m512i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x64();
let a = a.as_i8x64();
@@ -532,13 +532,13 @@ pub unsafe fn _mm512_mask_gf2p8affineinv_epi64_epi8<const B: i32>(
/// The inverse of 0 is 0.
/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_gf2p8affineinv_epi64_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_gf2p8affineinv_epi64_epi8)
#[inline]
#[target_feature(enable = "gfni,avx")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m256i, a: __m256i) -> __m256i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x32();
let a = a.as_i8x32();
@@ -556,7 +556,7 @@ pub unsafe fn _mm256_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m256i, a: __m2
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_gf2p8affineinv_epi64_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_gf2p8affineinv_epi64_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
@@ -566,7 +566,7 @@ pub unsafe fn _mm256_maskz_gf2p8affineinv_epi64_epi8<const B: i32>(
x: __m256i,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let zero = _mm256_setzero_si256().as_i8x32();
let x = x.as_i8x32();
@@ -585,7 +585,7 @@ pub unsafe fn _mm256_maskz_gf2p8affineinv_epi64_epi8<const B: i32>(
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_gf2p8affineinv_epi64_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_gf2p8affineinv_epi64_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
@@ -596,7 +596,7 @@ pub unsafe fn _mm256_mask_gf2p8affineinv_epi64_epi8<const B: i32>(
x: __m256i,
a: __m256i,
) -> __m256i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x32();
let a = a.as_i8x32();
@@ -611,13 +611,13 @@ pub unsafe fn _mm256_mask_gf2p8affineinv_epi64_epi8<const B: i32>(
/// The inverse of 0 is 0.
/// Each pack of 8 bytes in x is paired with the 64-bit word at the same position in a.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8affineinv_epi64_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_gf2p8affineinv_epi64_epi8)
#[inline]
#[target_feature(enable = "gfni")]
#[cfg_attr(test, assert_instr(gf2p8affineinvqb, B = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m128i, a: __m128i) -> __m128i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x16();
let a = a.as_i8x16();
@@ -635,7 +635,7 @@ pub unsafe fn _mm_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m128i, a: __m128i
/// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_gf2p8affineinv_epi64_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_gf2p8affineinv_epi64_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
@@ -645,7 +645,7 @@ pub unsafe fn _mm_maskz_gf2p8affineinv_epi64_epi8<const B: i32>(
x: __m128i,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let zero = _mm_setzero_si128().as_i8x16();
let x = x.as_i8x16();
@@ -664,7 +664,7 @@ pub unsafe fn _mm_maskz_gf2p8affineinv_epi64_epi8<const B: i32>(
/// Uses the writemask in k - elements are copied from src if the corresponding mask bit is not set.
/// Otherwise the computation result is written into the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_gf2p8affineinv_epi64_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_gf2p8affineinv_epi64_epi8)
#[inline]
#[target_feature(enable = "gfni,avx512bw,avx512vl")]
#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))]
@@ -675,7 +675,7 @@ pub unsafe fn _mm_mask_gf2p8affineinv_epi64_epi8<const B: i32>(
x: __m128i,
a: __m128i,
) -> __m128i {
- static_assert_imm8!(B);
+ static_assert_uimm_bits!(B, 8);
let b = B as u8;
let x = x.as_i8x16();
let a = a.as_i8x16();
@@ -698,7 +698,7 @@ mod tests {
fn mulbyte(left: u8, right: u8) -> u8 {
// this implementation follows the description in
- // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8mul_epi8
+ // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_gf2p8mul_epi8
const REDUCTION_POLYNOMIAL: u16 = 0x11b;
let left: u16 = left.into();
let right: u16 = right.into();
@@ -742,7 +742,7 @@ mod tests {
fn mat_vec_multiply_affine(matrix: u64, x: u8, b: u8) -> u8 {
// this implementation follows the description in
- // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8affine_epi64_epi8
+ // https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_gf2p8affine_epi64_epi8
let mut accumulator = 0;
for bit in 0..8 {
diff --git a/library/stdarch/crates/core_arch/src/x86/macros.rs b/library/stdarch/crates/core_arch/src/x86/macros.rs
index e686e65b3..17d64f5bb 100644
--- a/library/stdarch/crates/core_arch/src/x86/macros.rs
+++ b/library/stdarch/crates/core_arch/src/x86/macros.rs
@@ -1,89 +1,44 @@
//! Utility macros.
-//!
-// Helper struct used to trigger const eval errors when the const generic immediate value `imm` is
-// not a round number.
-pub(crate) struct ValidateConstRound<const IMM: i32>;
-impl<const IMM: i32> ValidateConstRound<IMM> {
- pub(crate) const VALID: () = {
- assert!(
- IMM == 4 || IMM == 8 || IMM == 9 || IMM == 10 || IMM == 11,
- "Invalid IMM value"
- );
- };
-}
+// Helper macro used to trigger const eval errors when the const generic immediate value `imm` is
+// not a round number.
#[allow(unused)]
macro_rules! static_assert_rounding {
($imm:ident) => {
- let _ = $crate::core_arch::x86::macros::ValidateConstRound::<$imm>::VALID;
+ static_assert!(
+ $imm == 4 || $imm == 8 || $imm == 9 || $imm == 10 || $imm == 11,
+ "Invalid IMM value"
+ )
};
}
-// Helper struct used to trigger const eval errors when the const generic immediate value `imm` is
+// Helper macro used to trigger const eval errors when the const generic immediate value `imm` is
// not a sae number.
-pub(crate) struct ValidateConstSae<const IMM: i32>;
-impl<const IMM: i32> ValidateConstSae<IMM> {
- pub(crate) const VALID: () = {
- assert!(IMM == 4 || IMM == 8, "Invalid IMM value");
- };
-}
-
#[allow(unused)]
macro_rules! static_assert_sae {
($imm:ident) => {
- let _ = $crate::core_arch::x86::macros::ValidateConstSae::<$imm>::VALID;
+ static_assert!($imm == 4 || $imm == 8, "Invalid IMM value")
};
}
-// Helper struct used to trigger const eval errors when the const generic immediate value `imm` is
+// Helper macro used to trigger const eval errors when the const generic immediate value `imm` is
// not a mantissas sae number.
-pub(crate) struct ValidateConstMantissasSae<const IMM: i32>;
-impl<const IMM: i32> ValidateConstMantissasSae<IMM> {
- pub(crate) const VALID: () = {
- assert!(IMM == 4 || IMM == 8 || IMM == 12, "Invalid IMM value");
- };
-}
-
#[allow(unused)]
macro_rules! static_assert_mantissas_sae {
($imm:ident) => {
- let _ = $crate::core_arch::x86::macros::ValidateConstMantissasSae::<$imm>::VALID;
+ static_assert!($imm == 4 || $imm == 8 || $imm == 12, "Invalid IMM value")
};
}
-// Helper struct used to trigger const eval errors when the unsigned const generic immediate value
-// `IMM` is out of `[MIN-MAX]` range.
-pub(crate) struct ValidateConstImmU32<const IMM: u32, const MIN: u32, const MAX: u32>;
-impl<const IMM: u32, const MIN: u32, const MAX: u32> ValidateConstImmU32<IMM, MIN, MAX> {
- pub(crate) const VALID: () = {
- assert!(IMM >= MIN && IMM <= MAX, "IMM value not in expected range");
- };
-}
-
-#[allow(unused_macros)]
-macro_rules! static_assert_imm_u8 {
- ($imm:ident) => {
- let _ =
- $crate::core_arch::x86::macros::ValidateConstImmU32::<$imm, 0, { (1 << 8) - 1 }>::VALID;
- };
-}
-
-// Helper struct used to trigger const eval errors when the const generic immediate value `SCALE` is
+// Helper macro used to trigger const eval errors when the const generic immediate value `SCALE` is
// not valid for gather instructions: the only valid scale values are 1, 2, 4 and 8.
-pub(crate) struct ValidateConstGatherScale<const SCALE: i32>;
-impl<const SCALE: i32> ValidateConstGatherScale<SCALE> {
- pub(crate) const VALID: () = {
- assert!(
- SCALE == 1 || SCALE == 2 || SCALE == 4 || SCALE == 8,
- "Invalid SCALE value"
- );
- };
-}
-
#[allow(unused)]
macro_rules! static_assert_imm8_scale {
($imm:ident) => {
- let _ = $crate::core_arch::x86::macros::ValidateConstGatherScale::<$imm>::VALID;
+ static_assert!(
+ $imm == 1 || $imm == 2 || $imm == 4 || $imm == 8,
+ "Invalid SCALE value"
+ )
};
}
diff --git a/library/stdarch/crates/core_arch/src/x86/pclmulqdq.rs b/library/stdarch/crates/core_arch/src/x86/pclmulqdq.rs
index a2ebdf9c8..6a5cd73f9 100644
--- a/library/stdarch/crates/core_arch/src/x86/pclmulqdq.rs
+++ b/library/stdarch/crates/core_arch/src/x86/pclmulqdq.rs
@@ -22,7 +22,7 @@ extern "C" {
/// The immediate byte is used for determining which halves of `a` and `b`
/// should be used. Immediate bits other than 0 and 4 are ignored.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_clmulepi64_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_clmulepi64_si128)
#[inline]
#[target_feature(enable = "pclmulqdq")]
#[cfg_attr(all(test, not(target_os = "linux")), assert_instr(pclmulqdq, IMM8 = 0))]
@@ -33,7 +33,7 @@ extern "C" {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_clmulepi64_si128<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pclmulqdq(a, b, IMM8 as u8)
}
diff --git a/library/stdarch/crates/core_arch/src/x86/rdrand.rs b/library/stdarch/crates/core_arch/src/x86/rdrand.rs
index c6bab9148..cfb7fc796 100644
--- a/library/stdarch/crates/core_arch/src/x86/rdrand.rs
+++ b/library/stdarch/crates/core_arch/src/x86/rdrand.rs
@@ -21,7 +21,7 @@ use stdarch_test::assert_instr;
/// Read a hardware generated 16-bit random value and store the result in val.
/// Returns 1 if a random value was generated, and 0 otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rdrand16_step)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_rdrand16_step)
#[inline]
#[target_feature(enable = "rdrand")]
#[cfg_attr(test, assert_instr(rdrand))]
@@ -35,7 +35,7 @@ pub unsafe fn _rdrand16_step(val: &mut u16) -> i32 {
/// Read a hardware generated 32-bit random value and store the result in val.
/// Returns 1 if a random value was generated, and 0 otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rdrand32_step)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_rdrand32_step)
#[inline]
#[target_feature(enable = "rdrand")]
#[cfg_attr(test, assert_instr(rdrand))]
@@ -49,7 +49,7 @@ pub unsafe fn _rdrand32_step(val: &mut u32) -> i32 {
/// Read a 16-bit NIST SP800-90B and SP800-90C compliant random value and store
/// in val. Return 1 if a random value was generated, and 0 otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rdseed16_step)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_rdseed16_step)
#[inline]
#[target_feature(enable = "rdseed")]
#[cfg_attr(test, assert_instr(rdseed))]
@@ -63,7 +63,7 @@ pub unsafe fn _rdseed16_step(val: &mut u16) -> i32 {
/// Read a 32-bit NIST SP800-90B and SP800-90C compliant random value and store
/// in val. Return 1 if a random value was generated, and 0 otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rdseed32_step)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_rdseed32_step)
#[inline]
#[target_feature(enable = "rdseed")]
#[cfg_attr(test, assert_instr(rdseed))]
diff --git a/library/stdarch/crates/core_arch/src/x86/rdtsc.rs b/library/stdarch/crates/core_arch/src/x86/rdtsc.rs
index 67f6e48fa..36422f2fc 100644
--- a/library/stdarch/crates/core_arch/src/x86/rdtsc.rs
+++ b/library/stdarch/crates/core_arch/src/x86/rdtsc.rs
@@ -18,7 +18,7 @@ use stdarch_test::assert_instr;
/// On processors that support the Intel 64 architecture, the
/// high-order 32 bits of each of RAX and RDX are cleared.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rdtsc)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_rdtsc)
#[inline]
#[cfg_attr(test, assert_instr(rdtsc))]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -41,7 +41,7 @@ pub unsafe fn _rdtsc() -> u64 {
/// On processors that support the Intel 64 architecture, the
/// high-order 32 bits of each of RAX, RDX, and RCX are cleared.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=__rdtscp)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=__rdtscp)
#[inline]
#[cfg_attr(test, assert_instr(rdtscp))]
#[stable(feature = "simd_x86", since = "1.27.0")]
diff --git a/library/stdarch/crates/core_arch/src/x86/rtm.rs b/library/stdarch/crates/core_arch/src/x86/rtm.rs
index dab73cde9..ea1e80057 100644
--- a/library/stdarch/crates/core_arch/src/x86/rtm.rs
+++ b/library/stdarch/crates/core_arch/src/x86/rtm.rs
@@ -79,7 +79,7 @@ pub unsafe fn _xend() {
#[cfg_attr(test, assert_instr(xabort, IMM8 = 0x0))]
#[rustc_legacy_const_generics(0)]
pub unsafe fn _xabort<const IMM8: u32>() {
- static_assert_imm_u8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
x86_xabort(IMM8 as i8)
}
diff --git a/library/stdarch/crates/core_arch/src/x86/sha.rs b/library/stdarch/crates/core_arch/src/x86/sha.rs
index cfb330cfb..5c5e81ba9 100644
--- a/library/stdarch/crates/core_arch/src/x86/sha.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sha.rs
@@ -28,7 +28,7 @@ use stdarch_test::assert_instr;
/// (unsigned 32-bit integers) using previous message values from `a` and `b`,
/// and returning the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sha1msg1_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha1msg1_epu32)
#[inline]
#[target_feature(enable = "sha")]
#[cfg_attr(test, assert_instr(sha1msg1))]
@@ -41,7 +41,7 @@ pub unsafe fn _mm_sha1msg1_epu32(a: __m128i, b: __m128i) -> __m128i {
/// (unsigned 32-bit integers) using the intermediate result in `a` and the
/// previous message values in `b`, and returns the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sha1msg2_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha1msg2_epu32)
#[inline]
#[target_feature(enable = "sha")]
#[cfg_attr(test, assert_instr(sha1msg2))]
@@ -54,7 +54,7 @@ pub unsafe fn _mm_sha1msg2_epu32(a: __m128i, b: __m128i) -> __m128i {
/// current SHA1 state variable `a`, add that value to the scheduled values
/// (unsigned 32-bit integers) in `b`, and returns the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sha1nexte_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha1nexte_epu32)
#[inline]
#[target_feature(enable = "sha")]
#[cfg_attr(test, assert_instr(sha1nexte))]
@@ -69,14 +69,14 @@ pub unsafe fn _mm_sha1nexte_epu32(a: __m128i, b: __m128i) -> __m128i {
/// updated SHA1 state (A,B,C,D). `FUNC` contains the logic functions and round
/// constants.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sha1rnds4_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha1rnds4_epu32)
#[inline]
#[target_feature(enable = "sha")]
#[cfg_attr(test, assert_instr(sha1rnds4, FUNC = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_sha1rnds4_epu32<const FUNC: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm2!(FUNC);
+ static_assert_uimm_bits!(FUNC, 2);
transmute(sha1rnds4(a.as_i32x4(), b.as_i32x4(), FUNC as i8))
}
@@ -84,7 +84,7 @@ pub unsafe fn _mm_sha1rnds4_epu32<const FUNC: i32>(a: __m128i, b: __m128i) -> __
/// (unsigned 32-bit integers) using previous message values from `a` and `b`,
/// and return the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sha256msg1_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha256msg1_epu32)
#[inline]
#[target_feature(enable = "sha")]
#[cfg_attr(test, assert_instr(sha256msg1))]
@@ -97,7 +97,7 @@ pub unsafe fn _mm_sha256msg1_epu32(a: __m128i, b: __m128i) -> __m128i {
/// (unsigned 32-bit integers) using previous message values from `a` and `b`,
/// and return the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sha256msg2_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha256msg2_epu32)
#[inline]
#[target_feature(enable = "sha")]
#[cfg_attr(test, assert_instr(sha256msg2))]
@@ -112,7 +112,7 @@ pub unsafe fn _mm_sha256msg2_epu32(a: __m128i, b: __m128i) -> __m128i {
/// integers) and the corresponding round constants from `k`, and store the
/// updated SHA256 state (A,B,E,F) in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sha256rnds2_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha256rnds2_epu32)
#[inline]
#[target_feature(enable = "sha")]
#[cfg_attr(test, assert_instr(sha256rnds2))]
diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs
index f21288970..7e4b352df 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse.rs
@@ -11,7 +11,7 @@ use stdarch_test::assert_instr;
/// Adds the first component of `a` and `b`, the other components are copied
/// from `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(addss))]
@@ -22,7 +22,7 @@ pub unsafe fn _mm_add_ss(a: __m128, b: __m128) -> __m128 {
/// Adds __m128 vectors.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(addps))]
@@ -34,7 +34,7 @@ pub unsafe fn _mm_add_ps(a: __m128, b: __m128) -> __m128 {
/// Subtracts the first component of `b` from `a`, the other components are
/// copied from `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(subss))]
@@ -45,7 +45,7 @@ pub unsafe fn _mm_sub_ss(a: __m128, b: __m128) -> __m128 {
/// Subtracts __m128 vectors.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(subps))]
@@ -57,7 +57,7 @@ pub unsafe fn _mm_sub_ps(a: __m128, b: __m128) -> __m128 {
/// Multiplies the first component of `a` and `b`, the other components are
/// copied from `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(mulss))]
@@ -68,7 +68,7 @@ pub unsafe fn _mm_mul_ss(a: __m128, b: __m128) -> __m128 {
/// Multiplies __m128 vectors.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(mulps))]
@@ -80,7 +80,7 @@ pub unsafe fn _mm_mul_ps(a: __m128, b: __m128) -> __m128 {
/// Divides the first component of `b` by `a`, the other components are
/// copied from `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(divss))]
@@ -91,7 +91,7 @@ pub unsafe fn _mm_div_ss(a: __m128, b: __m128) -> __m128 {
/// Divides __m128 vectors.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(divps))]
@@ -103,7 +103,7 @@ pub unsafe fn _mm_div_ps(a: __m128, b: __m128) -> __m128 {
/// Returns the square root of the first single-precision (32-bit)
/// floating-point element in `a`, the other elements are unchanged.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(sqrtss))]
@@ -115,7 +115,7 @@ pub unsafe fn _mm_sqrt_ss(a: __m128) -> __m128 {
/// Returns the square root of packed single-precision (32-bit) floating-point
/// elements in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(sqrtps))]
@@ -127,7 +127,7 @@ pub unsafe fn _mm_sqrt_ps(a: __m128) -> __m128 {
/// Returns the approximate reciprocal of the first single-precision
/// (32-bit) floating-point element in `a`, the other elements are unchanged.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(rcpss))]
@@ -139,7 +139,7 @@ pub unsafe fn _mm_rcp_ss(a: __m128) -> __m128 {
/// Returns the approximate reciprocal of packed single-precision (32-bit)
/// floating-point elements in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(rcpps))]
@@ -151,7 +151,7 @@ pub unsafe fn _mm_rcp_ps(a: __m128) -> __m128 {
/// Returns the approximate reciprocal square root of the first single-precision
/// (32-bit) floating-point element in `a`, the other elements are unchanged.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rsqrt_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rsqrt_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(rsqrtss))]
@@ -163,7 +163,7 @@ pub unsafe fn _mm_rsqrt_ss(a: __m128) -> __m128 {
/// Returns the approximate reciprocal square root of packed single-precision
/// (32-bit) floating-point elements in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rsqrt_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rsqrt_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(rsqrtps))]
@@ -176,7 +176,7 @@ pub unsafe fn _mm_rsqrt_ps(a: __m128) -> __m128 {
/// and `b`, and return the minimum value in the first element of the return
/// value, the other elements are copied from `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(minss))]
@@ -188,7 +188,7 @@ pub unsafe fn _mm_min_ss(a: __m128, b: __m128) -> __m128 {
/// Compares packed single-precision (32-bit) floating-point elements in `a` and
/// `b`, and return the corresponding minimum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(minps))]
@@ -202,7 +202,7 @@ pub unsafe fn _mm_min_ps(a: __m128, b: __m128) -> __m128 {
/// and `b`, and return the maximum value in the first element of the return
/// value, the other elements are copied from `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(maxss))]
@@ -214,7 +214,7 @@ pub unsafe fn _mm_max_ss(a: __m128, b: __m128) -> __m128 {
/// Compares packed single-precision (32-bit) floating-point elements in `a` and
/// `b`, and return the corresponding maximum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(maxps))]
@@ -226,7 +226,7 @@ pub unsafe fn _mm_max_ps(a: __m128, b: __m128) -> __m128 {
/// Bitwise AND of packed single-precision (32-bit) floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_and_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_ps)
#[inline]
#[target_feature(enable = "sse")]
// i586 only seems to generate plain `and` instructions, so ignore it.
@@ -246,7 +246,7 @@ pub unsafe fn _mm_and_ps(a: __m128, b: __m128) -> __m128 {
///
/// Computes `!a & b` for each bit in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_andnot_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_ps)
#[inline]
#[target_feature(enable = "sse")]
// i586 only seems to generate plain `not` and `and` instructions, so ignore
@@ -265,7 +265,7 @@ pub unsafe fn _mm_andnot_ps(a: __m128, b: __m128) -> __m128 {
/// Bitwise OR of packed single-precision (32-bit) floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_or_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_ps)
#[inline]
#[target_feature(enable = "sse")]
// i586 only seems to generate plain `or` instructions, so we ignore it.
@@ -283,7 +283,7 @@ pub unsafe fn _mm_or_ps(a: __m128, b: __m128) -> __m128 {
/// Bitwise exclusive OR of packed single-precision (32-bit) floating-point
/// elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_xor_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_ps)
#[inline]
#[target_feature(enable = "sse")]
// i586 only seems to generate plain `xor` instructions, so we ignore it.
@@ -302,7 +302,7 @@ pub unsafe fn _mm_xor_ps(a: __m128, b: __m128) -> __m128 {
/// the result will be `0xffffffff` if the two inputs are equal, or `0`
/// otherwise. The upper 96 bits of the result are the upper 96 bits of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpeqss))]
@@ -316,7 +316,7 @@ pub unsafe fn _mm_cmpeq_ss(a: __m128, b: __m128) -> __m128 {
/// `b.extract(0)`, or `0` otherwise. The upper 96 bits of the result are the
/// upper 96 bits of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpltss))]
@@ -330,7 +330,7 @@ pub unsafe fn _mm_cmplt_ss(a: __m128, b: __m128) -> __m128 {
/// or equal `b.extract(0)`, or `0` otherwise. The upper 96 bits of the result
/// are the upper 96 bits of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpless))]
@@ -344,13 +344,13 @@ pub unsafe fn _mm_cmple_ss(a: __m128, b: __m128) -> __m128 {
/// than `b.extract(0)`, or `0` otherwise. The upper 96 bits of the result
/// are the upper 96 bits of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpltss))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpgt_ss(a: __m128, b: __m128) -> __m128 {
- simd_shuffle4!(a, cmpss(b, a, 1), [4, 1, 2, 3])
+ simd_shuffle!(a, cmpss(b, a, 1), [4, 1, 2, 3])
}
/// Compares the lowest `f32` of both inputs for greater than or equal. The
@@ -358,13 +358,13 @@ pub unsafe fn _mm_cmpgt_ss(a: __m128, b: __m128) -> __m128 {
/// greater than or equal `b.extract(0)`, or `0` otherwise. The upper 96 bits
/// of the result are the upper 96 bits of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpless))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpge_ss(a: __m128, b: __m128) -> __m128 {
- simd_shuffle4!(a, cmpss(b, a, 2), [4, 1, 2, 3])
+ simd_shuffle!(a, cmpss(b, a, 2), [4, 1, 2, 3])
}
/// Compares the lowest `f32` of both inputs for inequality. The lowest 32 bits
@@ -372,7 +372,7 @@ pub unsafe fn _mm_cmpge_ss(a: __m128, b: __m128) -> __m128 {
/// `b.extract(0)`, or `0` otherwise. The upper 96 bits of the result are the
/// upper 96 bits of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpneqss))]
@@ -386,7 +386,7 @@ pub unsafe fn _mm_cmpneq_ss(a: __m128, b: __m128) -> __m128 {
/// `b.extract(0)`, or `0` otherwise. The upper 96 bits of the result are the
/// upper 96 bits of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnlt_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpnltss))]
@@ -400,7 +400,7 @@ pub unsafe fn _mm_cmpnlt_ss(a: __m128, b: __m128) -> __m128 {
/// less than or equal to `b.extract(0)`, or `0` otherwise. The upper 96 bits
/// of the result are the upper 96 bits of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnle_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpnless))]
@@ -414,13 +414,13 @@ pub unsafe fn _mm_cmpnle_ss(a: __m128, b: __m128) -> __m128 {
/// than `b.extract(0)`, or `0` otherwise. The upper 96 bits of the result are
/// the upper 96 bits of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpngt_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpnltss))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpngt_ss(a: __m128, b: __m128) -> __m128 {
- simd_shuffle4!(a, cmpss(b, a, 5), [4, 1, 2, 3])
+ simd_shuffle!(a, cmpss(b, a, 5), [4, 1, 2, 3])
}
/// Compares the lowest `f32` of both inputs for not-greater-than-or-equal. The
@@ -428,13 +428,13 @@ pub unsafe fn _mm_cmpngt_ss(a: __m128, b: __m128) -> __m128 {
/// greater than or equal to `b.extract(0)`, or `0` otherwise. The upper 96
/// bits of the result are the upper 96 bits of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnge_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpnless))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpnge_ss(a: __m128, b: __m128) -> __m128 {
- simd_shuffle4!(a, cmpss(b, a, 6), [4, 1, 2, 3])
+ simd_shuffle!(a, cmpss(b, a, 6), [4, 1, 2, 3])
}
/// Checks if the lowest `f32` of both inputs are ordered. The lowest 32 bits of
@@ -442,7 +442,7 @@ pub unsafe fn _mm_cmpnge_ss(a: __m128, b: __m128) -> __m128 {
/// `b.extract(0)` is a NaN, or `0` otherwise. The upper 96 bits of the result
/// are the upper 96 bits of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpord_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpordss))]
@@ -456,7 +456,7 @@ pub unsafe fn _mm_cmpord_ss(a: __m128, b: __m128) -> __m128 {
/// `b.extract(0)` is a NaN, or `0` otherwise. The upper 96 bits of the result
/// are the upper 96 bits of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpunord_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpunordss))]
@@ -469,7 +469,7 @@ pub unsafe fn _mm_cmpunord_ss(a: __m128, b: __m128) -> __m128 {
/// The result in the output vector will be `0xffffffff` if the input elements
/// were equal, or `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpeqps))]
@@ -482,7 +482,7 @@ pub unsafe fn _mm_cmpeq_ps(a: __m128, b: __m128) -> __m128 {
/// The result in the output vector will be `0xffffffff` if the input element
/// in `a` is less than the corresponding element in `b`, or `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpltps))]
@@ -496,7 +496,7 @@ pub unsafe fn _mm_cmplt_ps(a: __m128, b: __m128) -> __m128 {
/// in `a` is less than or equal to the corresponding element in `b`, or `0`
/// otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpleps))]
@@ -509,7 +509,7 @@ pub unsafe fn _mm_cmple_ps(a: __m128, b: __m128) -> __m128 {
/// The result in the output vector will be `0xffffffff` if the input element
/// in `a` is greater than the corresponding element in `b`, or `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpltps))]
@@ -523,7 +523,7 @@ pub unsafe fn _mm_cmpgt_ps(a: __m128, b: __m128) -> __m128 {
/// in `a` is greater than or equal to the corresponding element in `b`, or `0`
/// otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpleps))]
@@ -536,7 +536,7 @@ pub unsafe fn _mm_cmpge_ps(a: __m128, b: __m128) -> __m128 {
/// The result in the output vector will be `0xffffffff` if the input elements
/// are **not** equal, or `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpneqps))]
@@ -550,7 +550,7 @@ pub unsafe fn _mm_cmpneq_ps(a: __m128, b: __m128) -> __m128 {
/// in `a` is **not** less than the corresponding element in `b`, or `0`
/// otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnlt_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpnltps))]
@@ -564,7 +564,7 @@ pub unsafe fn _mm_cmpnlt_ps(a: __m128, b: __m128) -> __m128 {
/// in `a` is **not** less than or equal to the corresponding element in `b`, or
/// `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnle_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpnleps))]
@@ -578,7 +578,7 @@ pub unsafe fn _mm_cmpnle_ps(a: __m128, b: __m128) -> __m128 {
/// in `a` is **not** greater than the corresponding element in `b`, or `0`
/// otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpngt_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpnltps))]
@@ -592,7 +592,7 @@ pub unsafe fn _mm_cmpngt_ps(a: __m128, b: __m128) -> __m128 {
/// in `a` is **not** greater than or equal to the corresponding element in `b`,
/// or `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnge_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpnleps))]
@@ -606,7 +606,7 @@ pub unsafe fn _mm_cmpnge_ps(a: __m128, b: __m128) -> __m128 {
/// in the output vector will be `0xffffffff` if the input elements in `a` and
/// `b` are ordered (i.e., neither of them is a NaN), or 0 otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpord_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpordps))]
@@ -620,7 +620,7 @@ pub unsafe fn _mm_cmpord_ps(a: __m128, b: __m128) -> __m128 {
/// in the output vector will be `0xffffffff` if the input elements in `a` and
/// `b` are unordered (i.e., at least on of them is a NaN), or 0 otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpunord_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cmpunordps))]
@@ -632,7 +632,7 @@ pub unsafe fn _mm_cmpunord_ps(a: __m128, b: __m128) -> __m128 {
/// Compares two 32-bit floats from the low-order bits of `a` and `b`. Returns
/// `1` if they are equal, or `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comieq_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comieq_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(comiss))]
@@ -644,7 +644,7 @@ pub unsafe fn _mm_comieq_ss(a: __m128, b: __m128) -> i32 {
/// Compares two 32-bit floats from the low-order bits of `a` and `b`. Returns
/// `1` if the value from `a` is less than the one from `b`, or `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comilt_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comilt_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(comiss))]
@@ -657,7 +657,7 @@ pub unsafe fn _mm_comilt_ss(a: __m128, b: __m128) -> i32 {
/// `1` if the value from `a` is less than or equal to the one from `b`, or `0`
/// otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comile_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comile_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(comiss))]
@@ -670,7 +670,7 @@ pub unsafe fn _mm_comile_ss(a: __m128, b: __m128) -> i32 {
/// `1` if the value from `a` is greater than the one from `b`, or `0`
/// otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comigt_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comigt_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(comiss))]
@@ -683,7 +683,7 @@ pub unsafe fn _mm_comigt_ss(a: __m128, b: __m128) -> i32 {
/// `1` if the value from `a` is greater than or equal to the one from `b`, or
/// `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comige_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comige_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(comiss))]
@@ -695,7 +695,7 @@ pub unsafe fn _mm_comige_ss(a: __m128, b: __m128) -> i32 {
/// Compares two 32-bit floats from the low-order bits of `a` and `b`. Returns
/// `1` if they are **not** equal, or `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comineq_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comineq_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(comiss))]
@@ -708,7 +708,7 @@ pub unsafe fn _mm_comineq_ss(a: __m128, b: __m128) -> i32 {
/// `1` if they are equal, or `0` otherwise. This instruction will not signal
/// an exception if either argument is a quiet NaN.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomieq_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ucomieq_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(ucomiss))]
@@ -722,7 +722,7 @@ pub unsafe fn _mm_ucomieq_ss(a: __m128, b: __m128) -> i32 {
/// This instruction will not signal an exception if either argument is a quiet
/// NaN.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomilt_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ucomilt_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(ucomiss))]
@@ -736,7 +736,7 @@ pub unsafe fn _mm_ucomilt_ss(a: __m128, b: __m128) -> i32 {
/// otherwise. This instruction will not signal an exception if either argument
/// is a quiet NaN.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomile_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ucomile_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(ucomiss))]
@@ -750,7 +750,7 @@ pub unsafe fn _mm_ucomile_ss(a: __m128, b: __m128) -> i32 {
/// otherwise. This instruction will not signal an exception if either argument
/// is a quiet NaN.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomigt_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ucomigt_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(ucomiss))]
@@ -764,7 +764,7 @@ pub unsafe fn _mm_ucomigt_ss(a: __m128, b: __m128) -> i32 {
/// `0` otherwise. This instruction will not signal an exception if either
/// argument is a quiet NaN.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomige_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ucomige_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(ucomiss))]
@@ -777,7 +777,7 @@ pub unsafe fn _mm_ucomige_ss(a: __m128, b: __m128) -> i32 {
/// `1` if they are **not** equal, or `0` otherwise. This instruction will not
/// signal an exception if either argument is a quiet NaN.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomineq_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ucomineq_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(ucomiss))]
@@ -795,7 +795,7 @@ pub unsafe fn _mm_ucomineq_ss(a: __m128, b: __m128) -> i32 {
///
/// This corresponds to the `CVTSS2SI` instruction (with 32 bit output).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_si32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_si32)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cvtss2si))]
@@ -806,7 +806,7 @@ pub unsafe fn _mm_cvtss_si32(a: __m128) -> i32 {
/// Alias for [`_mm_cvtss_si32`](fn._mm_cvtss_si32.html).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_ss2si)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_ss2si)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cvtss2si))]
@@ -826,7 +826,7 @@ pub unsafe fn _mm_cvt_ss2si(a: __m128) -> i32 {
///
/// This corresponds to the `CVTTSS2SI` instruction (with 32 bit output).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_si32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_si32)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cvttss2si))]
@@ -837,7 +837,7 @@ pub unsafe fn _mm_cvttss_si32(a: __m128) -> i32 {
/// Alias for [`_mm_cvttss_si32`](fn._mm_cvttss_si32.html).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_ss2si)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_ss2si)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cvttss2si))]
@@ -848,7 +848,7 @@ pub unsafe fn _mm_cvtt_ss2si(a: __m128) -> i32 {
/// Extracts the lowest 32 bit float from the input vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_f32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_f32)
#[inline]
#[target_feature(enable = "sse")]
// No point in using assert_instrs. In Unix x86_64 calling convention this is a
@@ -864,7 +864,7 @@ pub unsafe fn _mm_cvtss_f32(a: __m128) -> f32 {
/// This intrinsic corresponds to the `CVTSI2SS` instruction (with 32 bit
/// input).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi32_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cvtsi2ss))]
@@ -875,7 +875,7 @@ pub unsafe fn _mm_cvtsi32_ss(a: __m128, b: i32) -> __m128 {
/// Alias for [`_mm_cvtsi32_ss`](fn._mm_cvtsi32_ss.html).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_si2ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_si2ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cvtsi2ss))]
@@ -887,7 +887,7 @@ pub unsafe fn _mm_cvt_si2ss(a: __m128, b: i32) -> __m128 {
/// Construct a `__m128` with the lowest element set to `a` and the rest set to
/// zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movss))]
@@ -898,7 +898,7 @@ pub unsafe fn _mm_set_ss(a: f32) -> __m128 {
/// Construct a `__m128` with all element set to `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(shufps))]
@@ -909,7 +909,7 @@ pub unsafe fn _mm_set1_ps(a: f32) -> __m128 {
/// Alias for [`_mm_set1_ps`](fn._mm_set1_ps.html)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_ps1)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ps1)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(shufps))]
@@ -936,7 +936,7 @@ pub unsafe fn _mm_set_ps1(a: f32) -> __m128 {
/// let v = _mm_set_ps(d, c, b, a);
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(unpcklps))]
@@ -954,7 +954,7 @@ pub unsafe fn _mm_set_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 {
/// assert_eq!(__m128::new(a, b, c, d), _mm_setr_ps(a, b, c, d));
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(
@@ -973,7 +973,7 @@ pub unsafe fn _mm_setr_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 {
/// Construct a `__m128` with all elements initialized to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setzero_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(xorps))]
@@ -997,11 +997,11 @@ pub const fn _MM_SHUFFLE(z: u32, y: u32, x: u32, w: u32) -> i32 {
/// The lower half of result takes values from `a` and the higher half from
/// `b`. Mask is split to 2 control bits each to index the element from inputs.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_ps)
///
/// Note that there appears to be a mistake within Intel's Intrinsics Guide.
/// `_mm_shuffle_ps` is supposed to take an `i32` instead of a `u32`
-/// as is the case for [other shuffle intrinsics](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_).
+/// as is the case for [other shuffle intrinsics](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_).
/// Performing an implicit type conversion between an unsigned integer and a signed integer
/// does not cause a problem in C, however Rust's commitment to strong typing does not allow this.
#[inline]
@@ -1010,11 +1010,11 @@ pub const fn _MM_SHUFFLE(z: u32, y: u32, x: u32, w: u32) -> i32 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_shuffle_ps<const MASK: i32>(a: __m128, b: __m128) -> __m128 {
- static_assert_imm8!(MASK);
- simd_shuffle4!(
+ static_assert_uimm_bits!(MASK, 8);
+ simd_shuffle!(
a,
b,
- <const MASK: i32> [
+ [
MASK as u32 & 0b11,
(MASK as u32 >> 2) & 0b11,
((MASK as u32 >> 4) & 0b11) + 4,
@@ -1026,50 +1026,50 @@ pub unsafe fn _mm_shuffle_ps<const MASK: i32>(a: __m128, b: __m128) -> __m128 {
/// Unpacks and interleave single-precision (32-bit) floating-point elements
/// from the higher half of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpackhi_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(unpckhps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_unpackhi_ps(a: __m128, b: __m128) -> __m128 {
- simd_shuffle4!(a, b, [2, 6, 3, 7])
+ simd_shuffle!(a, b, [2, 6, 3, 7])
}
/// Unpacks and interleave single-precision (32-bit) floating-point elements
/// from the lower half of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpacklo_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(unpcklps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_unpacklo_ps(a: __m128, b: __m128) -> __m128 {
- simd_shuffle4!(a, b, [0, 4, 1, 5])
+ simd_shuffle!(a, b, [0, 4, 1, 5])
}
-/// Combine higher half of `a` and `b`. The highwe half of `b` occupies the
+/// Combine higher half of `a` and `b`. The higher half of `b` occupies the
/// lower half of result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movehl_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movehl_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(all(test, not(target_os = "windows")), assert_instr(movhlps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_movehl_ps(a: __m128, b: __m128) -> __m128 {
// TODO; figure why this is a different instruction on Windows?
- simd_shuffle4!(a, b, [6, 7, 2, 3])
+ simd_shuffle!(a, b, [6, 7, 2, 3])
}
/// Combine lower half of `a` and `b`. The lower half of `b` occupies the
/// higher half of result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movelh_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movelh_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(all(test, not(target_os = "windows")), assert_instr(movlhps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_movelh_ps(a: __m128, b: __m128) -> __m128 {
- simd_shuffle4!(a, b, [0, 1, 4, 5])
+ simd_shuffle!(a, b, [0, 1, 4, 5])
}
/// Returns a mask of the most significant bit of each element in `a`.
@@ -1077,7 +1077,7 @@ pub unsafe fn _mm_movelh_ps(a: __m128, b: __m128) -> __m128 {
/// The mask is stored in the 4 least significant bits of the return value.
/// All other bits are set to `0`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movemask_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movmskps))]
@@ -1091,7 +1091,7 @@ pub unsafe fn _mm_movemask_ps(a: __m128) -> i32 {
///
/// This corresponds to instructions `VMOVSS` / `MOVSS`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movss))]
@@ -1106,7 +1106,7 @@ pub unsafe fn _mm_load_ss(p: *const f32) -> __m128 {
/// This corresponds to instructions `VMOVSS` / `MOVSS` followed by some
/// shuffling.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load1_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load1_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movss))]
@@ -1118,7 +1118,7 @@ pub unsafe fn _mm_load1_ps(p: *const f32) -> __m128 {
/// Alias for [`_mm_load1_ps`](fn._mm_load1_ps.html)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_ps1)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ps1)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movss))]
@@ -1136,7 +1136,7 @@ pub unsafe fn _mm_load_ps1(p: *const f32) -> __m128 {
///
/// This corresponds to instructions `VMOVAPS` / `MOVAPS`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movaps))]
@@ -1154,7 +1154,7 @@ pub unsafe fn _mm_load_ps(p: *const f32) -> __m128 {
///
/// This corresponds to instructions `VMOVUPS` / `MOVUPS`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movups))]
@@ -1191,21 +1191,21 @@ pub unsafe fn _mm_loadu_ps(p: *const f32) -> __m128 {
/// This corresponds to instructions `VMOVAPS` / `MOVAPS` followed by some
/// shuffling.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadr_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadr_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movaps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_loadr_ps(p: *const f32) -> __m128 {
let a = _mm_load_ps(p);
- simd_shuffle4!(a, a, [3, 2, 1, 0])
+ simd_shuffle!(a, a, [3, 2, 1, 0])
}
/// Loads unaligned 64-bits of integer data from memory into new vector.
///
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si64)
#[inline]
#[target_feature(enable = "sse")]
#[stable(feature = "simd_x86_mm_loadu_si64", since = "1.46.0")]
@@ -1217,7 +1217,7 @@ pub unsafe fn _mm_loadu_si64(mem_addr: *const u8) -> __m128i {
///
/// This intrinsic corresponds to the `MOVSS` instruction.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movss))]
@@ -1243,20 +1243,20 @@ pub unsafe fn _mm_store_ss(p: *mut f32, a: __m128) {
/// *p.add(3) = x;
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store1_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store1_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movaps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn _mm_store1_ps(p: *mut f32, a: __m128) {
- let b: __m128 = simd_shuffle4!(a, a, [0, 0, 0, 0]);
+ let b: __m128 = simd_shuffle!(a, a, [0, 0, 0, 0]);
*(p as *mut __m128) = b;
}
/// Alias for [`_mm_store1_ps`](fn._mm_store1_ps.html)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_ps1)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ps1)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movaps))]
@@ -1275,7 +1275,7 @@ pub unsafe fn _mm_store_ps1(p: *mut f32, a: __m128) {
///
/// This corresponds to instructions `VMOVAPS` / `MOVAPS`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movaps))]
@@ -1291,7 +1291,7 @@ pub unsafe fn _mm_store_ps(p: *mut f32, a: __m128) {
///
/// This corresponds to instructions `VMOVUPS` / `MOVUPS`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movups))]
@@ -1319,14 +1319,14 @@ pub unsafe fn _mm_storeu_ps(p: *mut f32, a: __m128) {
/// *p.add(3) = a.extract(0);
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storer_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storer_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movaps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn _mm_storer_ps(p: *mut f32, a: __m128) {
- let b: __m128 = simd_shuffle4!(a, a, [3, 2, 1, 0]);
+ let b: __m128 = simd_shuffle!(a, a, [3, 2, 1, 0]);
*(p as *mut __m128) = b;
}
@@ -1338,13 +1338,13 @@ pub unsafe fn _mm_storer_ps(p: *mut f32, a: __m128) {
/// _mm_move_ss(a, b) == a.replace(0, b.extract(0))
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movss))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_move_ss(a: __m128, b: __m128) -> __m128 {
- simd_shuffle4!(a, b, [4, 1, 2, 3])
+ simd_shuffle!(a, b, [4, 1, 2, 3])
}
/// Performs a serializing operation on all store-to-memory instructions that
@@ -1354,7 +1354,7 @@ pub unsafe fn _mm_move_ss(a: __m128, b: __m128) -> __m128 {
/// globally visible before any store instruction which follows the fence in
/// program order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sfence)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sfence)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(sfence))]
@@ -1367,7 +1367,7 @@ pub unsafe fn _mm_sfence() {
///
/// For more info see [`_mm_setcsr`](fn._mm_setcsr.html)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getcsr)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getcsr)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(stmxcsr))]
@@ -1380,7 +1380,7 @@ pub unsafe fn _mm_getcsr() -> u32 {
/// Sets the MXCSR register with the 32-bit unsigned integer value.
///
-/// This register constrols how SIMD instructions handle floating point
+/// This register controls how SIMD instructions handle floating point
/// operations. Modifying this register only affects the current thread.
///
/// It contains several groups of flags:
@@ -1504,7 +1504,7 @@ pub unsafe fn _mm_getcsr() -> u32 {
/// ```
///
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setcsr)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setcsr)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(ldmxcsr))]
@@ -1586,7 +1586,7 @@ pub const _MM_FLUSH_ZERO_OFF: u32 = 0x0000;
/// See [`_mm_setcsr`](fn._mm_setcsr.html)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_GET_EXCEPTION_MASK)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_EXCEPTION_MASK)
#[inline]
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
@@ -1597,7 +1597,7 @@ pub unsafe fn _MM_GET_EXCEPTION_MASK() -> u32 {
/// See [`_mm_setcsr`](fn._mm_setcsr.html)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_GET_EXCEPTION_STATE)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_EXCEPTION_STATE)
#[inline]
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
@@ -1608,7 +1608,7 @@ pub unsafe fn _MM_GET_EXCEPTION_STATE() -> u32 {
/// See [`_mm_setcsr`](fn._mm_setcsr.html)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_GET_FLUSH_ZERO_MODE)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_FLUSH_ZERO_MODE)
#[inline]
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
@@ -1619,7 +1619,7 @@ pub unsafe fn _MM_GET_FLUSH_ZERO_MODE() -> u32 {
/// See [`_mm_setcsr`](fn._mm_setcsr.html)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_GET_ROUNDING_MODE)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_ROUNDING_MODE)
#[inline]
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
@@ -1630,7 +1630,7 @@ pub unsafe fn _MM_GET_ROUNDING_MODE() -> u32 {
/// See [`_mm_setcsr`](fn._mm_setcsr.html)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_SET_EXCEPTION_MASK)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_EXCEPTION_MASK)
#[inline]
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
@@ -1641,7 +1641,7 @@ pub unsafe fn _MM_SET_EXCEPTION_MASK(x: u32) {
/// See [`_mm_setcsr`](fn._mm_setcsr.html)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_SET_EXCEPTION_STATE)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_EXCEPTION_STATE)
#[inline]
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
@@ -1652,7 +1652,7 @@ pub unsafe fn _MM_SET_EXCEPTION_STATE(x: u32) {
/// See [`_mm_setcsr`](fn._mm_setcsr.html)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_SET_FLUSH_ZERO_MODE)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_FLUSH_ZERO_MODE)
#[inline]
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
@@ -1665,7 +1665,7 @@ pub unsafe fn _MM_SET_FLUSH_ZERO_MODE(x: u32) {
/// See [`_mm_setcsr`](fn._mm_setcsr.html)
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_SET_ROUNDING_MODE)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_ROUNDING_MODE)
#[inline]
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
@@ -1739,7 +1739,7 @@ pub const _MM_HINT_ET1: i32 = 6;
/// resources (e.g., request buffers).
///
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_prefetch)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_prefetch)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(prefetcht0, STRATEGY = _MM_HINT_T0))]
@@ -1756,7 +1756,7 @@ pub unsafe fn _mm_prefetch<const STRATEGY: i32>(p: *const i8) {
/// Returns vector of type __m128 with undefined elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_undefined_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_ps)
#[inline]
#[target_feature(enable = "sse")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -1766,7 +1766,7 @@ pub unsafe fn _mm_undefined_ps() -> __m128 {
/// Transpose the 4x4 matrix formed by 4 rows of __m128 in place.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_TRANSPOSE4_PS)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_TRANSPOSE4_PS)
#[inline]
#[allow(non_snake_case)]
#[target_feature(enable = "sse")]
@@ -1869,7 +1869,7 @@ extern "C" {
/// `mem_addr` must be aligned on a 16-byte boundary or a general-protection
/// exception _may_ be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_ps)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(movntps))]
diff --git a/library/stdarch/crates/core_arch/src/x86/sse2.rs b/library/stdarch/crates/core_arch/src/x86/sse2.rs
index cde4bc316..e118ac05f 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse2.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse2.rs
@@ -15,7 +15,7 @@ use crate::{
/// This can help improve the performance and power consumption of spin-wait
/// loops.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_pause)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_pause)
#[inline]
#[cfg_attr(all(test, target_feature = "sse2"), assert_instr(pause))]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -28,7 +28,7 @@ pub unsafe fn _mm_pause() {
/// Invalidates and flushes the cache line that contains `p` from all levels of
/// the cache hierarchy.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_clflush)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_clflush)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(clflush))]
@@ -44,7 +44,7 @@ pub unsafe fn _mm_clflush(p: *const u8) {
/// globally visible before any load instruction which follows the fence in
/// program order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_lfence)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lfence)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(lfence))]
@@ -60,7 +60,7 @@ pub unsafe fn _mm_lfence() {
/// memory fence instruction is globally visible before any memory instruction
/// which follows the fence in program order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mfence)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mfence)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(mfence))]
@@ -71,7 +71,7 @@ pub unsafe fn _mm_mfence() {
/// Adds packed 8-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(paddb))]
@@ -82,7 +82,7 @@ pub unsafe fn _mm_add_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Adds packed 16-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(paddw))]
@@ -93,7 +93,7 @@ pub unsafe fn _mm_add_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Adds packed 32-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(paddd))]
@@ -104,7 +104,7 @@ pub unsafe fn _mm_add_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Adds packed 64-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi64)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(paddq))]
@@ -115,7 +115,7 @@ pub unsafe fn _mm_add_epi64(a: __m128i, b: __m128i) -> __m128i {
/// Adds packed 8-bit integers in `a` and `b` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_adds_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epi8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(paddsb))]
@@ -126,7 +126,7 @@ pub unsafe fn _mm_adds_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Adds packed 16-bit integers in `a` and `b` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_adds_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(paddsw))]
@@ -137,7 +137,7 @@ pub unsafe fn _mm_adds_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Adds packed unsigned 8-bit integers in `a` and `b` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_adds_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epu8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(paddusb))]
@@ -148,7 +148,7 @@ pub unsafe fn _mm_adds_epu8(a: __m128i, b: __m128i) -> __m128i {
/// Adds packed unsigned 16-bit integers in `a` and `b` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_adds_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epu16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(paddusw))]
@@ -159,7 +159,7 @@ pub unsafe fn _mm_adds_epu16(a: __m128i, b: __m128i) -> __m128i {
/// Averages packed unsigned 8-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_avg_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_epu8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pavgb))]
@@ -170,7 +170,7 @@ pub unsafe fn _mm_avg_epu8(a: __m128i, b: __m128i) -> __m128i {
/// Averages packed unsigned 16-bit integers in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_avg_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_epu16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pavgw))]
@@ -185,7 +185,7 @@ pub unsafe fn _mm_avg_epu16(a: __m128i, b: __m128i) -> __m128i {
/// intermediate signed 32-bit integers. Horizontally add adjacent pairs of
/// intermediate 32-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_madd_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_madd_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pmaddwd))]
@@ -197,7 +197,7 @@ pub unsafe fn _mm_madd_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 16-bit integers in `a` and `b`, and returns the packed
/// maximum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pmaxsw))]
@@ -211,7 +211,7 @@ pub unsafe fn _mm_max_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed unsigned 8-bit integers in `a` and `b`, and returns the
/// packed maximum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pmaxub))]
@@ -225,7 +225,7 @@ pub unsafe fn _mm_max_epu8(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 16-bit integers in `a` and `b`, and returns the packed
/// minimum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pminsw))]
@@ -239,7 +239,7 @@ pub unsafe fn _mm_min_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed unsigned 8-bit integers in `a` and `b`, and returns the
/// packed minimum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epu8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pminub))]
@@ -255,7 +255,7 @@ pub unsafe fn _mm_min_epu8(a: __m128i, b: __m128i) -> __m128i {
/// The multiplication produces intermediate 32-bit integers, and returns the
/// high 16 bits of the intermediate integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mulhi_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pmulhw))]
@@ -269,7 +269,7 @@ pub unsafe fn _mm_mulhi_epi16(a: __m128i, b: __m128i) -> __m128i {
/// The multiplication produces intermediate 32-bit integers, and returns the
/// high 16 bits of the intermediate integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mulhi_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_epu16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pmulhuw))]
@@ -283,7 +283,7 @@ pub unsafe fn _mm_mulhi_epu16(a: __m128i, b: __m128i) -> __m128i {
/// The multiplication produces intermediate 32-bit integers, and returns the
/// low 16 bits of the intermediate integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mullo_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mullo_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pmullw))]
@@ -297,7 +297,7 @@ pub unsafe fn _mm_mullo_epi16(a: __m128i, b: __m128i) -> __m128i {
///
/// Returns the unsigned 64-bit results.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_epu32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pmuludq))]
@@ -313,7 +313,7 @@ pub unsafe fn _mm_mul_epu32(a: __m128i, b: __m128i) -> __m128i {
/// two unsigned 16-bit integers, and pack these unsigned 16-bit integers in
/// the low 16 bits of 64-bit elements returned.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sad_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sad_epu8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psadbw))]
@@ -324,7 +324,7 @@ pub unsafe fn _mm_sad_epu8(a: __m128i, b: __m128i) -> __m128i {
/// Subtracts packed 8-bit integers in `b` from packed 8-bit integers in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psubb))]
@@ -335,7 +335,7 @@ pub unsafe fn _mm_sub_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Subtracts packed 16-bit integers in `b` from packed 16-bit integers in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psubw))]
@@ -346,7 +346,7 @@ pub unsafe fn _mm_sub_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Subtract packed 32-bit integers in `b` from packed 32-bit integers in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psubd))]
@@ -357,7 +357,7 @@ pub unsafe fn _mm_sub_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Subtract packed 64-bit integers in `b` from packed 64-bit integers in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi64)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psubq))]
@@ -369,7 +369,7 @@ pub unsafe fn _mm_sub_epi64(a: __m128i, b: __m128i) -> __m128i {
/// Subtract packed 8-bit integers in `b` from packed 8-bit integers in `a`
/// using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_subs_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epi8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psubsb))]
@@ -381,7 +381,7 @@ pub unsafe fn _mm_subs_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Subtract packed 16-bit integers in `b` from packed 16-bit integers in `a`
/// using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_subs_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psubsw))]
@@ -393,7 +393,7 @@ pub unsafe fn _mm_subs_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Subtract packed unsigned 8-bit integers in `b` from packed unsigned 8-bit
/// integers in `a` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_subs_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epu8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psubusb))]
@@ -405,7 +405,7 @@ pub unsafe fn _mm_subs_epu8(a: __m128i, b: __m128i) -> __m128i {
/// Subtract packed unsigned 16-bit integers in `b` from packed unsigned 16-bit
/// integers in `a` using saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_subs_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epu16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psubusw))]
@@ -416,14 +416,14 @@ pub unsafe fn _mm_subs_epu16(a: __m128i, b: __m128i) -> __m128i {
/// Shifts `a` left by `IMM8` bytes while shifting in zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_slli_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pslldq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_slli_si128<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
_mm_slli_si128_impl::<IMM8>(a)
}
@@ -441,10 +441,10 @@ unsafe fn _mm_slli_si128_impl<const IMM8: i32>(a: __m128i) -> __m128i {
}
}
let zero = _mm_set1_epi8(0).as_i8x16();
- transmute::<i8x16, _>(simd_shuffle16!(
+ transmute::<i8x16, _>(simd_shuffle!(
zero,
a.as_i8x16(),
- <const IMM8: i32> [
+ [
mask(IMM8, 0),
mask(IMM8, 1),
mask(IMM8, 2),
@@ -467,47 +467,47 @@ unsafe fn _mm_slli_si128_impl<const IMM8: i32>(a: __m128i) -> __m128i {
/// Shifts `a` left by `IMM8` bytes while shifting in zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_bslli_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_bslli_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pslldq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_bslli_si128<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
_mm_slli_si128_impl::<IMM8>(a)
}
/// Shifts `a` right by `IMM8` bytes while shifting in zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_bsrli_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_bsrli_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psrldq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_bsrli_si128<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
_mm_srli_si128_impl::<IMM8>(a)
}
/// Shifts packed 16-bit integers in `a` left by `IMM8` while shifting in zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_slli_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psllw, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_slli_epi16<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(pslliw(a.as_i16x8(), IMM8))
}
/// Shifts packed 16-bit integers in `a` left by `count` while shifting in
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sll_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psllw))]
@@ -518,21 +518,21 @@ pub unsafe fn _mm_sll_epi16(a: __m128i, count: __m128i) -> __m128i {
/// Shifts packed 32-bit integers in `a` left by `IMM8` while shifting in zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_slli_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pslld, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_slli_epi32<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(psllid(a.as_i32x4(), IMM8))
}
/// Shifts packed 32-bit integers in `a` left by `count` while shifting in
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sll_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pslld))]
@@ -543,21 +543,21 @@ pub unsafe fn _mm_sll_epi32(a: __m128i, count: __m128i) -> __m128i {
/// Shifts packed 64-bit integers in `a` left by `IMM8` while shifting in zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_slli_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi64)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psllq, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_slli_epi64<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(pslliq(a.as_i64x2(), IMM8))
}
/// Shifts packed 64-bit integers in `a` left by `count` while shifting in
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sll_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi64)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psllq))]
@@ -569,21 +569,21 @@ pub unsafe fn _mm_sll_epi64(a: __m128i, count: __m128i) -> __m128i {
/// Shifts packed 16-bit integers in `a` right by `IMM8` while shifting in sign
/// bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srai_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psraw, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_srai_epi16<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(psraiw(a.as_i16x8(), IMM8))
}
/// Shifts packed 16-bit integers in `a` right by `count` while shifting in sign
/// bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sra_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psraw))]
@@ -595,21 +595,21 @@ pub unsafe fn _mm_sra_epi16(a: __m128i, count: __m128i) -> __m128i {
/// Shifts packed 32-bit integers in `a` right by `IMM8` while shifting in sign
/// bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srai_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psrad, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_srai_epi32<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(psraid(a.as_i32x4(), IMM8))
}
/// Shifts packed 32-bit integers in `a` right by `count` while shifting in sign
/// bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sra_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psrad))]
@@ -620,14 +620,14 @@ pub unsafe fn _mm_sra_epi32(a: __m128i, count: __m128i) -> __m128i {
/// Shifts `a` right by `IMM8` bytes while shifting in zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psrldq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_srli_si128<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
_mm_srli_si128_impl::<IMM8>(a)
}
@@ -644,10 +644,10 @@ unsafe fn _mm_srli_si128_impl<const IMM8: i32>(a: __m128i) -> __m128i {
}
}
let zero = _mm_set1_epi8(0).as_i8x16();
- let x: i8x16 = simd_shuffle16!(
+ let x: i8x16 = simd_shuffle!(
a.as_i8x16(),
zero,
- <const IMM8: i32> [
+ [
mask(IMM8, 0),
mask(IMM8, 1),
mask(IMM8, 2),
@@ -672,21 +672,21 @@ unsafe fn _mm_srli_si128_impl<const IMM8: i32>(a: __m128i) -> __m128i {
/// Shifts packed 16-bit integers in `a` right by `IMM8` while shifting in
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psrlw, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_srli_epi16<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(psrliw(a.as_i16x8(), IMM8))
}
/// Shifts packed 16-bit integers in `a` right by `count` while shifting in
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srl_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psrlw))]
@@ -698,21 +698,21 @@ pub unsafe fn _mm_srl_epi16(a: __m128i, count: __m128i) -> __m128i {
/// Shifts packed 32-bit integers in `a` right by `IMM8` while shifting in
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psrld, IMM8 = 8))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_srli_epi32<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(psrlid(a.as_i32x4(), IMM8))
}
/// Shifts packed 32-bit integers in `a` right by `count` while shifting in
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srl_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psrld))]
@@ -724,21 +724,21 @@ pub unsafe fn _mm_srl_epi32(a: __m128i, count: __m128i) -> __m128i {
/// Shifts packed 64-bit integers in `a` right by `IMM8` while shifting in
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi64)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psrlq, IMM8 = 1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_srli_epi64<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(psrliq(a.as_i64x2(), IMM8))
}
/// Shifts packed 64-bit integers in `a` right by `count` while shifting in
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srl_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi64)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(psrlq))]
@@ -750,7 +750,7 @@ pub unsafe fn _mm_srl_epi64(a: __m128i, count: __m128i) -> __m128i {
/// Computes the bitwise AND of 128 bits (representing integer data) in `a` and
/// `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_and_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(andps))]
@@ -762,7 +762,7 @@ pub unsafe fn _mm_and_si128(a: __m128i, b: __m128i) -> __m128i {
/// Computes the bitwise NOT of 128 bits (representing integer data) in `a` and
/// then AND with `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_andnot_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(andnps))]
@@ -774,7 +774,7 @@ pub unsafe fn _mm_andnot_si128(a: __m128i, b: __m128i) -> __m128i {
/// Computes the bitwise OR of 128 bits (representing integer data) in `a` and
/// `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_or_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(orps))]
@@ -786,7 +786,7 @@ pub unsafe fn _mm_or_si128(a: __m128i, b: __m128i) -> __m128i {
/// Computes the bitwise XOR of 128 bits (representing integer data) in `a` and
/// `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_xor_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(xorps))]
@@ -797,7 +797,7 @@ pub unsafe fn _mm_xor_si128(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 8-bit integers in `a` and `b` for equality.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pcmpeqb))]
@@ -808,7 +808,7 @@ pub unsafe fn _mm_cmpeq_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 16-bit integers in `a` and `b` for equality.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pcmpeqw))]
@@ -819,7 +819,7 @@ pub unsafe fn _mm_cmpeq_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 32-bit integers in `a` and `b` for equality.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pcmpeqd))]
@@ -830,7 +830,7 @@ pub unsafe fn _mm_cmpeq_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 8-bit integers in `a` and `b` for greater-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pcmpgtb))]
@@ -841,7 +841,7 @@ pub unsafe fn _mm_cmpgt_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 16-bit integers in `a` and `b` for greater-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pcmpgtw))]
@@ -852,7 +852,7 @@ pub unsafe fn _mm_cmpgt_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 32-bit integers in `a` and `b` for greater-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pcmpgtd))]
@@ -863,7 +863,7 @@ pub unsafe fn _mm_cmpgt_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 8-bit integers in `a` and `b` for less-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pcmpgtb))]
@@ -874,7 +874,7 @@ pub unsafe fn _mm_cmplt_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 16-bit integers in `a` and `b` for less-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pcmpgtw))]
@@ -885,7 +885,7 @@ pub unsafe fn _mm_cmplt_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 32-bit integers in `a` and `b` for less-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pcmpgtd))]
@@ -897,20 +897,20 @@ pub unsafe fn _mm_cmplt_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Converts the lower two packed 32-bit integers in `a` to packed
/// double-precision (64-bit) floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtdq2pd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepi32_pd(a: __m128i) -> __m128d {
let a = a.as_i32x4();
- simd_cast::<i32x2, __m128d>(simd_shuffle2!(a, a, [0, 1]))
+ simd_cast::<i32x2, __m128d>(simd_shuffle!(a, a, [0, 1]))
}
/// Returns `a` with its lower element replaced by `b` after converting it to
/// an `f64`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi32_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtsi2sd))]
@@ -922,7 +922,7 @@ pub unsafe fn _mm_cvtsi32_sd(a: __m128d, b: i32) -> __m128d {
/// Converts packed 32-bit integers in `a` to packed single-precision (32-bit)
/// floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_ps)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtdq2ps))]
@@ -934,7 +934,7 @@ pub unsafe fn _mm_cvtepi32_ps(a: __m128i) -> __m128 {
/// Converts packed single-precision (32-bit) floating-point elements in `a`
/// to packed 32-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtps2dq))]
@@ -946,7 +946,7 @@ pub unsafe fn _mm_cvtps_epi32(a: __m128) -> __m128i {
/// Returns a vector whose lowest element is `a` and all higher elements are
/// `0`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi32_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(all(test, target_arch = "x86_64"), assert_instr(movd))]
@@ -957,7 +957,7 @@ pub unsafe fn _mm_cvtsi32_si128(a: i32) -> __m128i {
/// Returns the lowest element of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(all(test, not(target_os = "windows")), assert_instr(movd))]
@@ -969,7 +969,7 @@ pub unsafe fn _mm_cvtsi128_si32(a: __m128i) -> i32 {
/// Sets packed 64-bit integers with the supplied values, from highest to
/// lowest.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_epi64x)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi64x)
#[inline]
#[target_feature(enable = "sse2")]
// no particular instruction to test
@@ -980,7 +980,7 @@ pub unsafe fn _mm_set_epi64x(e1: i64, e0: i64) -> __m128i {
/// Sets packed 32-bit integers with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi32)
#[inline]
#[target_feature(enable = "sse2")]
// no particular instruction to test
@@ -991,7 +991,7 @@ pub unsafe fn _mm_set_epi32(e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i {
/// Sets packed 16-bit integers with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi16)
#[inline]
#[target_feature(enable = "sse2")]
// no particular instruction to test
@@ -1011,7 +1011,7 @@ pub unsafe fn _mm_set_epi16(
/// Sets packed 8-bit integers with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi8)
#[inline]
#[target_feature(enable = "sse2")]
// no particular instruction to test
@@ -1042,7 +1042,7 @@ pub unsafe fn _mm_set_epi8(
/// Broadcasts 64-bit integer `a` to all elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_epi64x)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi64x)
#[inline]
#[target_feature(enable = "sse2")]
// no particular instruction to test
@@ -1053,7 +1053,7 @@ pub unsafe fn _mm_set1_epi64x(a: i64) -> __m128i {
/// Broadcasts 32-bit integer `a` to all elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi32)
#[inline]
#[target_feature(enable = "sse2")]
// no particular instruction to test
@@ -1064,7 +1064,7 @@ pub unsafe fn _mm_set1_epi32(a: i32) -> __m128i {
/// Broadcasts 16-bit integer `a` to all elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi16)
#[inline]
#[target_feature(enable = "sse2")]
// no particular instruction to test
@@ -1075,7 +1075,7 @@ pub unsafe fn _mm_set1_epi16(a: i16) -> __m128i {
/// Broadcasts 8-bit integer `a` to all elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi8)
#[inline]
#[target_feature(enable = "sse2")]
// no particular instruction to test
@@ -1086,7 +1086,7 @@ pub unsafe fn _mm_set1_epi8(a: i8) -> __m128i {
/// Sets packed 32-bit integers with the supplied values in reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi32)
#[inline]
#[target_feature(enable = "sse2")]
// no particular instruction to test
@@ -1097,7 +1097,7 @@ pub unsafe fn _mm_setr_epi32(e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i {
/// Sets packed 16-bit integers with the supplied values in reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi16)
#[inline]
#[target_feature(enable = "sse2")]
// no particular instruction to test
@@ -1117,7 +1117,7 @@ pub unsafe fn _mm_setr_epi16(
/// Sets packed 8-bit integers with the supplied values in reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi8)
#[inline]
#[target_feature(enable = "sse2")]
// no particular instruction to test
@@ -1148,7 +1148,7 @@ pub unsafe fn _mm_setr_epi8(
/// Returns a vector with all elements set to zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setzero_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(xorps))]
@@ -1159,7 +1159,7 @@ pub unsafe fn _mm_setzero_si128() -> __m128i {
/// Loads 64-bit integer from memory into first element of returned vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadl_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_epi64)
#[inline]
#[target_feature(enable = "sse2")]
// FIXME movsd on windows
@@ -1181,7 +1181,7 @@ pub unsafe fn _mm_loadl_epi64(mem_addr: *const __m128i) -> __m128i {
///
/// `mem_addr` must be aligned on a 16-byte boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movaps))]
@@ -1194,7 +1194,7 @@ pub unsafe fn _mm_load_si128(mem_addr: *const __m128i) -> __m128i {
///
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movups))]
@@ -1218,7 +1218,7 @@ pub unsafe fn _mm_loadu_si128(mem_addr: *const __m128i) -> __m128i {
/// `mem_addr` should correspond to a 128-bit memory location and does not need
/// to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskmoveu_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskmoveu_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(maskmovdqu))]
@@ -1231,7 +1231,7 @@ pub unsafe fn _mm_maskmoveu_si128(a: __m128i, mask: __m128i, mem_addr: *mut i8)
///
/// `mem_addr` must be aligned on a 16-byte boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movaps))]
@@ -1244,7 +1244,7 @@ pub unsafe fn _mm_store_si128(mem_addr: *mut __m128i, a: __m128i) {
///
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movups))] // FIXME movdqu expected
@@ -1257,7 +1257,7 @@ pub unsafe fn _mm_storeu_si128(mem_addr: *mut __m128i, a: __m128i) {
///
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storel_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_epi64)
#[inline]
#[target_feature(enable = "sse2")]
// FIXME mov on windows, movlps on i686
@@ -1279,7 +1279,7 @@ pub unsafe fn _mm_storel_epi64(mem_addr: *mut __m128i, a: __m128i) {
/// To minimize caching, the data is flagged as non-temporal (unlikely to be
/// used again soon).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movntps))] // FIXME movntdq
@@ -1292,7 +1292,7 @@ pub unsafe fn _mm_stream_si128(mem_addr: *mut __m128i, a: __m128i) {
/// To minimize caching, the data is flagged as non-temporal (unlikely to be
/// used again soon).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_si32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movnti))]
@@ -1304,7 +1304,7 @@ pub unsafe fn _mm_stream_si32(mem_addr: *mut i32, a: i32) {
/// Returns a vector where the low element is extracted from `a` and its upper
/// element is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_epi64)
#[inline]
#[target_feature(enable = "sse2")]
// FIXME movd on windows, movd on i686
@@ -1312,14 +1312,14 @@ pub unsafe fn _mm_stream_si32(mem_addr: *mut i32, a: i32) {
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_move_epi64(a: __m128i) -> __m128i {
let zero = _mm_setzero_si128();
- let r: i64x2 = simd_shuffle2!(a.as_i64x2(), zero.as_i64x2(), [0, 2]);
+ let r: i64x2 = simd_shuffle!(a.as_i64x2(), zero.as_i64x2(), [0, 2]);
transmute(r)
}
/// Converts packed 16-bit integers from `a` and `b` to packed 8-bit integers
/// using signed saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_packs_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packs_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(packsswb))]
@@ -1331,7 +1331,7 @@ pub unsafe fn _mm_packs_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Converts packed 32-bit integers from `a` and `b` to packed 16-bit integers
/// using signed saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_packs_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packs_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(packssdw))]
@@ -1343,7 +1343,7 @@ pub unsafe fn _mm_packs_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Converts packed 16-bit integers from `a` and `b` to packed 8-bit integers
/// using unsigned saturation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_packus_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packus_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(packuswb))]
@@ -1354,33 +1354,33 @@ pub unsafe fn _mm_packus_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Returns the `imm8` element of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_extract_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pextrw, IMM8 = 7))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_extract_epi16<const IMM8: i32>(a: __m128i) -> i32 {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
simd_extract::<_, u16>(a.as_u16x8(), IMM8 as u32) as i32
}
/// Returns a new vector where the `imm8` element of `a` is replaced with `i`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_insert_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pinsrw, IMM8 = 7))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_insert_epi16<const IMM8: i32>(a: __m128i, i: i32) -> __m128i {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
transmute(simd_insert(a.as_i16x8(), IMM8 as u32, i as i16))
}
/// Returns a mask of the most significant bit of each element in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movemask_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_epi8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pmovmskb))]
@@ -1393,19 +1393,19 @@ pub unsafe fn _mm_movemask_epi8(a: __m128i) -> i32 {
/// Shuffles 32-bit integers in `a` using the control in `IMM8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pshufd, IMM8 = 9))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_shuffle_epi32<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i32x4();
- let x: i32x4 = simd_shuffle4!(
+ let x: i32x4 = simd_shuffle!(
a,
a,
- <const IMM8: i32> [
+ [
IMM8 as u32 & 0b11,
(IMM8 as u32 >> 2) & 0b11,
(IMM8 as u32 >> 4) & 0b11,
@@ -1421,19 +1421,19 @@ pub unsafe fn _mm_shuffle_epi32<const IMM8: i32>(a: __m128i) -> __m128i {
/// Put the results in the high 64 bits of the returned vector, with the low 64
/// bits being copied from `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shufflehi_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shufflehi_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pshufhw, IMM8 = 9))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_shufflehi_epi16<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x8();
- let x: i16x8 = simd_shuffle8!(
+ let x: i16x8 = simd_shuffle!(
a,
a,
- <const IMM8: i32> [
+ [
0,
1,
2,
@@ -1453,19 +1453,19 @@ pub unsafe fn _mm_shufflehi_epi16<const IMM8: i32>(a: __m128i) -> __m128i {
/// Put the results in the low 64 bits of the returned vector, with the high 64
/// bits being copied from `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shufflelo_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shufflelo_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(pshuflw, IMM8 = 9))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_shufflelo_epi16<const IMM8: i32>(a: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
let a = a.as_i16x8();
- let x: i16x8 = simd_shuffle8!(
+ let x: i16x8 = simd_shuffle!(
a,
a,
- <const IMM8: i32> [
+ [
IMM8 as u32 & 0b11,
(IMM8 as u32 >> 2) & 0b11,
(IMM8 as u32 >> 4) & 0b11,
@@ -1481,13 +1481,13 @@ pub unsafe fn _mm_shufflelo_epi16<const IMM8: i32>(a: __m128i) -> __m128i {
/// Unpacks and interleave 8-bit integers from the high half of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpackhi_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(punpckhbw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_unpackhi_epi8(a: __m128i, b: __m128i) -> __m128i {
- transmute::<i8x16, _>(simd_shuffle16!(
+ transmute::<i8x16, _>(simd_shuffle!(
a.as_i8x16(),
b.as_i8x16(),
[8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31],
@@ -1496,47 +1496,47 @@ pub unsafe fn _mm_unpackhi_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Unpacks and interleave 16-bit integers from the high half of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpackhi_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(punpckhwd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_unpackhi_epi16(a: __m128i, b: __m128i) -> __m128i {
- let x = simd_shuffle8!(a.as_i16x8(), b.as_i16x8(), [4, 12, 5, 13, 6, 14, 7, 15]);
+ let x = simd_shuffle!(a.as_i16x8(), b.as_i16x8(), [4, 12, 5, 13, 6, 14, 7, 15]);
transmute::<i16x8, _>(x)
}
/// Unpacks and interleave 32-bit integers from the high half of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpackhi_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(unpckhps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_unpackhi_epi32(a: __m128i, b: __m128i) -> __m128i {
- transmute::<i32x4, _>(simd_shuffle4!(a.as_i32x4(), b.as_i32x4(), [2, 6, 3, 7]))
+ transmute::<i32x4, _>(simd_shuffle!(a.as_i32x4(), b.as_i32x4(), [2, 6, 3, 7]))
}
/// Unpacks and interleave 64-bit integers from the high half of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpackhi_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi64)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(unpckhpd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_unpackhi_epi64(a: __m128i, b: __m128i) -> __m128i {
- transmute::<i64x2, _>(simd_shuffle2!(a.as_i64x2(), b.as_i64x2(), [1, 3]))
+ transmute::<i64x2, _>(simd_shuffle!(a.as_i64x2(), b.as_i64x2(), [1, 3]))
}
/// Unpacks and interleave 8-bit integers from the low half of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpacklo_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi8)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(punpcklbw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_unpacklo_epi8(a: __m128i, b: __m128i) -> __m128i {
- transmute::<i8x16, _>(simd_shuffle16!(
+ transmute::<i8x16, _>(simd_shuffle!(
a.as_i8x16(),
b.as_i8x16(),
[0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23],
@@ -1545,42 +1545,42 @@ pub unsafe fn _mm_unpacklo_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Unpacks and interleave 16-bit integers from the low half of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpacklo_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi16)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(punpcklwd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_unpacklo_epi16(a: __m128i, b: __m128i) -> __m128i {
- let x = simd_shuffle8!(a.as_i16x8(), b.as_i16x8(), [0, 8, 1, 9, 2, 10, 3, 11]);
+ let x = simd_shuffle!(a.as_i16x8(), b.as_i16x8(), [0, 8, 1, 9, 2, 10, 3, 11]);
transmute::<i16x8, _>(x)
}
/// Unpacks and interleave 32-bit integers from the low half of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpacklo_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(unpcklps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_unpacklo_epi32(a: __m128i, b: __m128i) -> __m128i {
- transmute::<i32x4, _>(simd_shuffle4!(a.as_i32x4(), b.as_i32x4(), [0, 4, 1, 5]))
+ transmute::<i32x4, _>(simd_shuffle!(a.as_i32x4(), b.as_i32x4(), [0, 4, 1, 5]))
}
/// Unpacks and interleave 64-bit integers from the low half of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpacklo_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi64)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(all(test, not(target_os = "windows")), assert_instr(movlhps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_unpacklo_epi64(a: __m128i, b: __m128i) -> __m128i {
- transmute::<i64x2, _>(simd_shuffle2!(a.as_i64x2(), b.as_i64x2(), [0, 2]))
+ transmute::<i64x2, _>(simd_shuffle!(a.as_i64x2(), b.as_i64x2(), [0, 2]))
}
/// Returns a new vector with the low element of `a` replaced by the sum of the
/// low elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(addsd))]
@@ -1592,7 +1592,7 @@ pub unsafe fn _mm_add_sd(a: __m128d, b: __m128d) -> __m128d {
/// Adds packed double-precision (64-bit) floating-point elements in `a` and
/// `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(addpd))]
@@ -1604,7 +1604,7 @@ pub unsafe fn _mm_add_pd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the result of
/// diving the lower element of `a` by the lower element of `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(divsd))]
@@ -1616,7 +1616,7 @@ pub unsafe fn _mm_div_sd(a: __m128d, b: __m128d) -> __m128d {
/// Divide packed double-precision (64-bit) floating-point elements in `a` by
/// packed elements in `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(divpd))]
@@ -1628,7 +1628,7 @@ pub unsafe fn _mm_div_pd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the maximum
/// of the lower elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(maxsd))]
@@ -1640,7 +1640,7 @@ pub unsafe fn _mm_max_sd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the maximum values from corresponding elements in
/// `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(maxpd))]
@@ -1652,7 +1652,7 @@ pub unsafe fn _mm_max_pd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the minimum
/// of the lower elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(minsd))]
@@ -1664,7 +1664,7 @@ pub unsafe fn _mm_min_sd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the minimum values from corresponding elements in
/// `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(minpd))]
@@ -1676,7 +1676,7 @@ pub unsafe fn _mm_min_pd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by multiplying the
/// low elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(mulsd))]
@@ -1688,7 +1688,7 @@ pub unsafe fn _mm_mul_sd(a: __m128d, b: __m128d) -> __m128d {
/// Multiplies packed double-precision (64-bit) floating-point elements in `a`
/// and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(mulpd))]
@@ -1700,7 +1700,7 @@ pub unsafe fn _mm_mul_pd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the square
/// root of the lower element `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(sqrtsd))]
@@ -1711,7 +1711,7 @@ pub unsafe fn _mm_sqrt_sd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the square root of each of the values in `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(sqrtpd))]
@@ -1723,7 +1723,7 @@ pub unsafe fn _mm_sqrt_pd(a: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by subtracting the
/// low element by `b` from the low element of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(subsd))]
@@ -1735,7 +1735,7 @@ pub unsafe fn _mm_sub_sd(a: __m128d, b: __m128d) -> __m128d {
/// Subtract packed double-precision (64-bit) floating-point elements in `b`
/// from `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(subpd))]
@@ -1747,7 +1747,7 @@ pub unsafe fn _mm_sub_pd(a: __m128d, b: __m128d) -> __m128d {
/// Computes the bitwise AND of packed double-precision (64-bit) floating-point
/// elements in `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_and_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(andps))]
@@ -1760,7 +1760,7 @@ pub unsafe fn _mm_and_pd(a: __m128d, b: __m128d) -> __m128d {
/// Computes the bitwise NOT of `a` and then AND with `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_andnot_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(andnps))]
@@ -1773,7 +1773,7 @@ pub unsafe fn _mm_andnot_pd(a: __m128d, b: __m128d) -> __m128d {
/// Computes the bitwise OR of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_or_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(orps))]
@@ -1786,7 +1786,7 @@ pub unsafe fn _mm_or_pd(a: __m128d, b: __m128d) -> __m128d {
/// Computes the bitwise XOR of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_xor_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(xorps))]
@@ -1800,7 +1800,7 @@ pub unsafe fn _mm_xor_pd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the equality
/// comparison of the lower elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpeqsd))]
@@ -1812,7 +1812,7 @@ pub unsafe fn _mm_cmpeq_sd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the less-than
/// comparison of the lower elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpltsd))]
@@ -1824,7 +1824,7 @@ pub unsafe fn _mm_cmplt_sd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the
/// less-than-or-equal comparison of the lower elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmplesd))]
@@ -1836,7 +1836,7 @@ pub unsafe fn _mm_cmple_sd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the
/// greater-than comparison of the lower elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpltsd))]
@@ -1848,7 +1848,7 @@ pub unsafe fn _mm_cmpgt_sd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the
/// greater-than-or-equal comparison of the lower elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmplesd))]
@@ -1862,7 +1862,7 @@ pub unsafe fn _mm_cmpge_sd(a: __m128d, b: __m128d) -> __m128d {
/// neither are equal to `NaN` then `0xFFFFFFFFFFFFFFFF` is used and `0`
/// otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpord_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpordsd))]
@@ -1875,7 +1875,7 @@ pub unsafe fn _mm_cmpord_sd(a: __m128d, b: __m128d) -> __m128d {
/// comparing both of the lower elements of `a` and `b` to `NaN`. If either is
/// equal to `NaN` then `0xFFFFFFFFFFFFFFFF` is used and `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpunord_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpunordsd))]
@@ -1887,7 +1887,7 @@ pub unsafe fn _mm_cmpunord_sd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the not-equal
/// comparison of the lower elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpneqsd))]
@@ -1899,7 +1899,7 @@ pub unsafe fn _mm_cmpneq_sd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the
/// not-less-than comparison of the lower elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnlt_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpnltsd))]
@@ -1911,7 +1911,7 @@ pub unsafe fn _mm_cmpnlt_sd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the
/// not-less-than-or-equal comparison of the lower elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnle_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpnlesd))]
@@ -1923,7 +1923,7 @@ pub unsafe fn _mm_cmpnle_sd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the
/// not-greater-than comparison of the lower elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpngt_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpnltsd))]
@@ -1935,7 +1935,7 @@ pub unsafe fn _mm_cmpngt_sd(a: __m128d, b: __m128d) -> __m128d {
/// Returns a new vector with the low element of `a` replaced by the
/// not-greater-than-or-equal comparison of the lower elements of `a` and `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnge_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpnlesd))]
@@ -1946,7 +1946,7 @@ pub unsafe fn _mm_cmpnge_sd(a: __m128d, b: __m128d) -> __m128d {
/// Compares corresponding elements in `a` and `b` for equality.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpeqpd))]
@@ -1957,7 +1957,7 @@ pub unsafe fn _mm_cmpeq_pd(a: __m128d, b: __m128d) -> __m128d {
/// Compares corresponding elements in `a` and `b` for less-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpltpd))]
@@ -1968,7 +1968,7 @@ pub unsafe fn _mm_cmplt_pd(a: __m128d, b: __m128d) -> __m128d {
/// Compares corresponding elements in `a` and `b` for less-than-or-equal
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmplepd))]
@@ -1979,7 +1979,7 @@ pub unsafe fn _mm_cmple_pd(a: __m128d, b: __m128d) -> __m128d {
/// Compares corresponding elements in `a` and `b` for greater-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpltpd))]
@@ -1990,7 +1990,7 @@ pub unsafe fn _mm_cmpgt_pd(a: __m128d, b: __m128d) -> __m128d {
/// Compares corresponding elements in `a` and `b` for greater-than-or-equal.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmplepd))]
@@ -2001,7 +2001,7 @@ pub unsafe fn _mm_cmpge_pd(a: __m128d, b: __m128d) -> __m128d {
/// Compares corresponding elements in `a` and `b` to see if neither is `NaN`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpord_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpordpd))]
@@ -2012,7 +2012,7 @@ pub unsafe fn _mm_cmpord_pd(a: __m128d, b: __m128d) -> __m128d {
/// Compares corresponding elements in `a` and `b` to see if either is `NaN`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpunord_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpunordpd))]
@@ -2023,7 +2023,7 @@ pub unsafe fn _mm_cmpunord_pd(a: __m128d, b: __m128d) -> __m128d {
/// Compares corresponding elements in `a` and `b` for not-equal.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpneqpd))]
@@ -2034,7 +2034,7 @@ pub unsafe fn _mm_cmpneq_pd(a: __m128d, b: __m128d) -> __m128d {
/// Compares corresponding elements in `a` and `b` for not-less-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnlt_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpnltpd))]
@@ -2045,7 +2045,7 @@ pub unsafe fn _mm_cmpnlt_pd(a: __m128d, b: __m128d) -> __m128d {
/// Compares corresponding elements in `a` and `b` for not-less-than-or-equal.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnle_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpnlepd))]
@@ -2056,7 +2056,7 @@ pub unsafe fn _mm_cmpnle_pd(a: __m128d, b: __m128d) -> __m128d {
/// Compares corresponding elements in `a` and `b` for not-greater-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpngt_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpnltpd))]
@@ -2068,7 +2068,7 @@ pub unsafe fn _mm_cmpngt_pd(a: __m128d, b: __m128d) -> __m128d {
/// Compares corresponding elements in `a` and `b` for
/// not-greater-than-or-equal.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnge_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cmpnlepd))]
@@ -2079,7 +2079,7 @@ pub unsafe fn _mm_cmpnge_pd(a: __m128d, b: __m128d) -> __m128d {
/// Compares the lower element of `a` and `b` for equality.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comieq_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comieq_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(comisd))]
@@ -2090,7 +2090,7 @@ pub unsafe fn _mm_comieq_sd(a: __m128d, b: __m128d) -> i32 {
/// Compares the lower element of `a` and `b` for less-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comilt_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comilt_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(comisd))]
@@ -2101,7 +2101,7 @@ pub unsafe fn _mm_comilt_sd(a: __m128d, b: __m128d) -> i32 {
/// Compares the lower element of `a` and `b` for less-than-or-equal.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comile_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comile_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(comisd))]
@@ -2112,7 +2112,7 @@ pub unsafe fn _mm_comile_sd(a: __m128d, b: __m128d) -> i32 {
/// Compares the lower element of `a` and `b` for greater-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comigt_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comigt_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(comisd))]
@@ -2123,7 +2123,7 @@ pub unsafe fn _mm_comigt_sd(a: __m128d, b: __m128d) -> i32 {
/// Compares the lower element of `a` and `b` for greater-than-or-equal.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comige_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comige_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(comisd))]
@@ -2134,7 +2134,7 @@ pub unsafe fn _mm_comige_sd(a: __m128d, b: __m128d) -> i32 {
/// Compares the lower element of `a` and `b` for not-equal.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comineq_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comineq_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(comisd))]
@@ -2145,7 +2145,7 @@ pub unsafe fn _mm_comineq_sd(a: __m128d, b: __m128d) -> i32 {
/// Compares the lower element of `a` and `b` for equality.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomieq_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ucomieq_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(ucomisd))]
@@ -2156,7 +2156,7 @@ pub unsafe fn _mm_ucomieq_sd(a: __m128d, b: __m128d) -> i32 {
/// Compares the lower element of `a` and `b` for less-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomilt_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ucomilt_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(ucomisd))]
@@ -2167,7 +2167,7 @@ pub unsafe fn _mm_ucomilt_sd(a: __m128d, b: __m128d) -> i32 {
/// Compares the lower element of `a` and `b` for less-than-or-equal.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomile_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ucomile_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(ucomisd))]
@@ -2178,7 +2178,7 @@ pub unsafe fn _mm_ucomile_sd(a: __m128d, b: __m128d) -> i32 {
/// Compares the lower element of `a` and `b` for greater-than.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomigt_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ucomigt_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(ucomisd))]
@@ -2189,7 +2189,7 @@ pub unsafe fn _mm_ucomigt_sd(a: __m128d, b: __m128d) -> i32 {
/// Compares the lower element of `a` and `b` for greater-than-or-equal.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomige_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ucomige_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(ucomisd))]
@@ -2200,7 +2200,7 @@ pub unsafe fn _mm_ucomige_sd(a: __m128d, b: __m128d) -> i32 {
/// Compares the lower element of `a` and `b` for not-equal.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomineq_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ucomineq_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(ucomisd))]
@@ -2212,7 +2212,7 @@ pub unsafe fn _mm_ucomineq_sd(a: __m128d, b: __m128d) -> i32 {
/// Converts packed double-precision (64-bit) floating-point elements in `a` to
/// packed single-precision (32-bit) floating-point elements
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpd_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_ps)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtpd2ps))]
@@ -2225,7 +2225,7 @@ pub unsafe fn _mm_cvtpd_ps(a: __m128d) -> __m128 {
/// packed
/// double-precision (64-bit) floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtps2pd))]
@@ -2237,7 +2237,7 @@ pub unsafe fn _mm_cvtps_pd(a: __m128) -> __m128d {
/// Converts packed double-precision (64-bit) floating-point elements in `a` to
/// packed 32-bit integers.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpd_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtpd2dq))]
@@ -2249,7 +2249,7 @@ pub unsafe fn _mm_cvtpd_epi32(a: __m128d) -> __m128i {
/// Converts the lower double-precision (64-bit) floating-point element in a to
/// a 32-bit integer.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_si32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtsd2si))]
@@ -2263,7 +2263,7 @@ pub unsafe fn _mm_cvtsd_si32(a: __m128d) -> i32 {
/// the lower element of the return value, and copies the upper element from `a`
/// to the upper element the return value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_ss)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtsd2ss))]
@@ -2274,7 +2274,7 @@ pub unsafe fn _mm_cvtsd_ss(a: __m128, b: __m128d) -> __m128 {
/// Returns the lower double-precision (64-bit) floating-point element of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_f64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_f64)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2287,7 +2287,7 @@ pub unsafe fn _mm_cvtsd_f64(a: __m128d) -> f64 {
/// the lower element of the return value, and copies the upper element from `a`
/// to the upper element the return value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtss2sd))]
@@ -2299,7 +2299,7 @@ pub unsafe fn _mm_cvtss_sd(a: __m128d, b: __m128) -> __m128d {
/// Converts packed double-precision (64-bit) floating-point elements in `a` to
/// packed 32-bit integers with truncation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttpd_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttpd_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvttpd2dq))]
@@ -2311,7 +2311,7 @@ pub unsafe fn _mm_cvttpd_epi32(a: __m128d) -> __m128i {
/// Converts the lower double-precision (64-bit) floating-point element in `a`
/// to a 32-bit integer with truncation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_si32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvttsd2si))]
@@ -2323,7 +2323,7 @@ pub unsafe fn _mm_cvttsd_si32(a: __m128d) -> i32 {
/// Converts packed single-precision (32-bit) floating-point elements in `a` to
/// packed 32-bit integers with truncation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttps_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttps_epi32)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvttps2dq))]
@@ -2335,7 +2335,7 @@ pub unsafe fn _mm_cvttps_epi32(a: __m128) -> __m128i {
/// Copies double-precision (64-bit) floating-point element `a` to the lower
/// element of the packed 64-bit return value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2346,7 +2346,7 @@ pub unsafe fn _mm_set_sd(a: f64) -> __m128d {
/// Broadcasts double-precision (64-bit) floating-point value a to all elements
/// of the return value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2357,7 +2357,7 @@ pub unsafe fn _mm_set1_pd(a: f64) -> __m128d {
/// Broadcasts double-precision (64-bit) floating-point value a to all elements
/// of the return value.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_pd1)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_pd1)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2368,7 +2368,7 @@ pub unsafe fn _mm_set_pd1(a: f64) -> __m128d {
/// Sets packed double-precision (64-bit) floating-point elements in the return
/// value with the supplied values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2379,7 +2379,7 @@ pub unsafe fn _mm_set_pd(a: f64, b: f64) -> __m128d {
/// Sets packed double-precision (64-bit) floating-point elements in the return
/// value with the supplied values in reverse order.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2390,7 +2390,7 @@ pub unsafe fn _mm_setr_pd(a: f64, b: f64) -> __m128d {
/// Returns packed double-precision (64-bit) floating-point elements with all
/// zeros.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setzero_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(xorps))] // FIXME xorpd expected
@@ -2404,7 +2404,7 @@ pub unsafe fn _mm_setzero_pd() -> __m128d {
/// The mask is stored in the 2 least significant bits of the return value.
/// All other bits are set to `0`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movemask_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movmskpd))]
@@ -2418,7 +2418,7 @@ pub unsafe fn _mm_movemask_pd(a: __m128d) -> i32 {
/// `mem_addr` must be aligned on a 16-byte boundary or a general-protection
/// exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movaps))]
@@ -2431,7 +2431,7 @@ pub unsafe fn _mm_load_pd(mem_addr: *const f64) -> __m128d {
/// Loads a 64-bit double-precision value to the low element of a
/// 128-bit integer vector and clears the upper element.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movsd))]
@@ -2444,7 +2444,7 @@ pub unsafe fn _mm_load_sd(mem_addr: *const f64) -> __m128d {
/// vector of `[2 x double]`. The low-order bits are copied from the low-order
/// bits of the first operand.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadh_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadh_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movhps))]
@@ -2457,7 +2457,7 @@ pub unsafe fn _mm_loadh_pd(a: __m128d, mem_addr: *const f64) -> __m128d {
/// vector of `[2 x double]`. The high-order bits are copied from the
/// high-order bits of the first operand.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadl_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movlps))]
@@ -2471,7 +2471,7 @@ pub unsafe fn _mm_loadl_pd(a: __m128d, mem_addr: *const f64) -> __m128d {
/// To minimize caching, the data is flagged as non-temporal (unlikely to be
/// used again soon).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movntps))] // FIXME movntpd
@@ -2484,7 +2484,7 @@ pub unsafe fn _mm_stream_pd(mem_addr: *mut f64, a: __m128d) {
/// Stores the lower 64 bits of a 128-bit vector of `[2 x double]` to a
/// memory location.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(all(test, not(target_os = "windows")), assert_instr(movlps))]
@@ -2497,7 +2497,7 @@ pub unsafe fn _mm_store_sd(mem_addr: *mut f64, a: __m128d) {
/// floating-point elements) from `a` into memory. `mem_addr` must be aligned
/// on a 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movaps))]
@@ -2511,7 +2511,7 @@ pub unsafe fn _mm_store_pd(mem_addr: *mut f64, a: __m128d) {
/// floating-point elements) from `a` into memory.
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movups))] // FIXME movupd expected
@@ -2524,13 +2524,13 @@ pub unsafe fn _mm_storeu_pd(mem_addr: *mut f64, a: __m128d) {
/// into 2 contiguous elements in memory. `mem_addr` must be aligned on a
/// 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store1_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store1_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn _mm_store1_pd(mem_addr: *mut f64, a: __m128d) {
- let b: __m128d = simd_shuffle2!(a, a, [0, 0]);
+ let b: __m128d = simd_shuffle!(a, a, [0, 0]);
*(mem_addr as *mut __m128d) = b;
}
@@ -2538,13 +2538,13 @@ pub unsafe fn _mm_store1_pd(mem_addr: *mut f64, a: __m128d) {
/// into 2 contiguous elements in memory. `mem_addr` must be aligned on a
/// 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_pd1)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_pd1)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn _mm_store_pd1(mem_addr: *mut f64, a: __m128d) {
- let b: __m128d = simd_shuffle2!(a, a, [0, 0]);
+ let b: __m128d = simd_shuffle!(a, a, [0, 0]);
*(mem_addr as *mut __m128d) = b;
}
@@ -2553,20 +2553,20 @@ pub unsafe fn _mm_store_pd1(mem_addr: *mut f64, a: __m128d) {
/// `mem_addr` must be aligned on a 16-byte boundary or a general-protection
/// exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storer_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storer_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn _mm_storer_pd(mem_addr: *mut f64, a: __m128d) {
- let b: __m128d = simd_shuffle2!(a, a, [1, 0]);
+ let b: __m128d = simd_shuffle!(a, a, [1, 0]);
*(mem_addr as *mut __m128d) = b;
}
/// Stores the upper 64 bits of a 128-bit vector of `[2 x double]` to a
/// memory location.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeh_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeh_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(all(test, not(target_os = "windows")), assert_instr(movhps))]
@@ -2578,7 +2578,7 @@ pub unsafe fn _mm_storeh_pd(mem_addr: *mut f64, a: __m128d) {
/// Stores the lower 64 bits of a 128-bit vector of `[2 x double]` to a
/// memory location.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storel_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(all(test, not(target_os = "windows")), assert_instr(movlps))]
@@ -2590,7 +2590,7 @@ pub unsafe fn _mm_storel_pd(mem_addr: *mut f64, a: __m128d) {
/// Loads a double-precision (64-bit) floating-point element from memory
/// into both elements of returned vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load1_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load1_pd)
#[inline]
#[target_feature(enable = "sse2")]
// #[cfg_attr(test, assert_instr(movapd))] // FIXME LLVM uses different codegen
@@ -2603,7 +2603,7 @@ pub unsafe fn _mm_load1_pd(mem_addr: *const f64) -> __m128d {
/// Loads a double-precision (64-bit) floating-point element from memory
/// into both elements of returned vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd1)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_pd1)
#[inline]
#[target_feature(enable = "sse2")]
// #[cfg_attr(test, assert_instr(movapd))] // FIXME same as _mm_load1_pd
@@ -2616,21 +2616,21 @@ pub unsafe fn _mm_load_pd1(mem_addr: *const f64) -> __m128d {
/// the returned vector in reverse order. `mem_addr` must be aligned on a
/// 16-byte boundary or a general-protection exception may be generated.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadr_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadr_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movaps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_loadr_pd(mem_addr: *const f64) -> __m128d {
let a = _mm_load_pd(mem_addr);
- simd_shuffle2!(a, a, [1, 0])
+ simd_shuffle!(a, a, [1, 0])
}
/// Loads 128-bits (composed of 2 packed double-precision (64-bit)
/// floating-point elements) from memory into the returned vector.
/// `mem_addr` does not need to be aligned on any particular boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movups))]
@@ -2649,22 +2649,22 @@ pub unsafe fn _mm_loadu_pd(mem_addr: *const f64) -> __m128d {
/// 128-bit vector parameters of `[2 x double]`, using the immediate-value
/// parameter as a specifier.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(shufps, MASK = 2))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_shuffle_pd<const MASK: i32>(a: __m128d, b: __m128d) -> __m128d {
- static_assert_imm8!(MASK);
- simd_shuffle2!(a, b, <const MASK: i32> [MASK as u32 & 0b1, ((MASK as u32 >> 1) & 0b1) + 2])
+ static_assert_uimm_bits!(MASK, 8);
+ simd_shuffle!(a, b, [MASK as u32 & 0b1, ((MASK as u32 >> 1) & 0b1) + 2])
}
/// Constructs a 128-bit floating-point vector of `[2 x double]`. The lower
/// 64 bits are set to the lower 64 bits of the second parameter. The upper
/// 64 bits are set to the upper 64 bits of the first parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movsd))]
@@ -2676,7 +2676,7 @@ pub unsafe fn _mm_move_sd(a: __m128d, b: __m128d) -> __m128d {
/// Casts a 128-bit floating-point vector of `[2 x double]` into a 128-bit
/// floating-point vector of `[4 x float]`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castpd_ps)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2687,7 +2687,7 @@ pub unsafe fn _mm_castpd_ps(a: __m128d) -> __m128 {
/// Casts a 128-bit floating-point vector of `[2 x double]` into a 128-bit
/// integer vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castpd_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2698,7 +2698,7 @@ pub unsafe fn _mm_castpd_si128(a: __m128d) -> __m128i {
/// Casts a 128-bit floating-point vector of `[4 x float]` into a 128-bit
/// floating-point vector of `[2 x double]`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castps_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castps_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2709,7 +2709,7 @@ pub unsafe fn _mm_castps_pd(a: __m128) -> __m128d {
/// Casts a 128-bit floating-point vector of `[4 x float]` into a 128-bit
/// integer vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castps_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castps_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2720,7 +2720,7 @@ pub unsafe fn _mm_castps_si128(a: __m128) -> __m128i {
/// Casts a 128-bit integer vector into a 128-bit floating-point vector
/// of `[2 x double]`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castsi128_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castsi128_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2731,7 +2731,7 @@ pub unsafe fn _mm_castsi128_pd(a: __m128i) -> __m128d {
/// Casts a 128-bit integer vector into a 128-bit floating-point vector
/// of `[4 x float]`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castsi128_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castsi128_ps)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2741,7 +2741,7 @@ pub unsafe fn _mm_castsi128_ps(a: __m128i) -> __m128 {
/// Returns vector of type __m128d with undefined elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_undefined_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2751,7 +2751,7 @@ pub unsafe fn _mm_undefined_pd() -> __m128d {
/// Returns vector of type __m128i with undefined elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_undefined_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -2766,13 +2766,13 @@ pub unsafe fn _mm_undefined_si128() -> __m128i {
/// input * The `[63:0]` bits are copied from the `[127:64]` bits of the first
/// input
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpackhi_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(unpckhpd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_unpackhi_pd(a: __m128d, b: __m128d) -> __m128d {
- simd_shuffle2!(a, b, [1, 3])
+ simd_shuffle!(a, b, [1, 3])
}
/// The resulting `__m128d` element is composed by the high-order values of
@@ -2781,13 +2781,13 @@ pub unsafe fn _mm_unpackhi_pd(a: __m128d, b: __m128d) -> __m128d {
/// * The `[127:64]` bits are copied from the `[63:0]` bits of the second input
/// * The `[63:0]` bits are copied from the `[63:0]` bits of the first input
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpacklo_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_pd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(all(test, not(target_os = "windows")), assert_instr(movlhps))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_unpacklo_pd(a: __m128d, b: __m128d) -> __m128d {
- simd_shuffle2!(a, b, [0, 2])
+ simd_shuffle!(a, b, [0, 2])
}
#[allow(improper_ctypes)]
diff --git a/library/stdarch/crates/core_arch/src/x86/sse3.rs b/library/stdarch/crates/core_arch/src/x86/sse3.rs
index 61f8a4e78..092a8d9cd 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse3.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse3.rs
@@ -11,7 +11,7 @@ use stdarch_test::assert_instr;
/// Alternatively add and subtract packed single-precision (32-bit)
/// floating-point elements in `a` to/from packed elements in `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_addsub_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_addsub_ps)
#[inline]
#[target_feature(enable = "sse3")]
#[cfg_attr(test, assert_instr(addsubps))]
@@ -23,7 +23,7 @@ pub unsafe fn _mm_addsub_ps(a: __m128, b: __m128) -> __m128 {
/// Alternatively add and subtract packed double-precision (64-bit)
/// floating-point elements in `a` to/from packed elements in `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_addsub_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_addsub_pd)
#[inline]
#[target_feature(enable = "sse3")]
#[cfg_attr(test, assert_instr(addsubpd))]
@@ -35,7 +35,7 @@ pub unsafe fn _mm_addsub_pd(a: __m128d, b: __m128d) -> __m128d {
/// Horizontally adds adjacent pairs of double-precision (64-bit)
/// floating-point elements in `a` and `b`, and pack the results.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pd)
#[inline]
#[target_feature(enable = "sse3")]
#[cfg_attr(test, assert_instr(haddpd))]
@@ -47,7 +47,7 @@ pub unsafe fn _mm_hadd_pd(a: __m128d, b: __m128d) -> __m128d {
/// Horizontally adds adjacent pairs of single-precision (32-bit)
/// floating-point elements in `a` and `b`, and pack the results.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_ps)
#[inline]
#[target_feature(enable = "sse3")]
#[cfg_attr(test, assert_instr(haddps))]
@@ -59,7 +59,7 @@ pub unsafe fn _mm_hadd_ps(a: __m128, b: __m128) -> __m128 {
/// Horizontally subtract adjacent pairs of double-precision (64-bit)
/// floating-point elements in `a` and `b`, and pack the results.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsub_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_pd)
#[inline]
#[target_feature(enable = "sse3")]
#[cfg_attr(test, assert_instr(hsubpd))]
@@ -71,7 +71,7 @@ pub unsafe fn _mm_hsub_pd(a: __m128d, b: __m128d) -> __m128d {
/// Horizontally adds adjacent pairs of single-precision (32-bit)
/// floating-point elements in `a` and `b`, and pack the results.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsub_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_ps)
#[inline]
#[target_feature(enable = "sse3")]
#[cfg_attr(test, assert_instr(hsubps))]
@@ -84,7 +84,7 @@ pub unsafe fn _mm_hsub_ps(a: __m128, b: __m128) -> __m128 {
/// This intrinsic may perform better than `_mm_loadu_si128`
/// when the data crosses a cache line boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_lddqu_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lddqu_si128)
#[inline]
#[target_feature(enable = "sse3")]
#[cfg_attr(test, assert_instr(lddqu))]
@@ -96,19 +96,19 @@ pub unsafe fn _mm_lddqu_si128(mem_addr: *const __m128i) -> __m128i {
/// Duplicate the low double-precision (64-bit) floating-point element
/// from `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movedup_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movedup_pd)
#[inline]
#[target_feature(enable = "sse3")]
#[cfg_attr(test, assert_instr(movddup))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_movedup_pd(a: __m128d) -> __m128d {
- simd_shuffle2!(a, a, [0, 0])
+ simd_shuffle!(a, a, [0, 0])
}
/// Loads a double-precision (64-bit) floating-point element from memory
/// into both elements of return vector.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loaddup_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loaddup_pd)
#[inline]
#[target_feature(enable = "sse3")]
#[cfg_attr(test, assert_instr(movddup))]
@@ -120,25 +120,25 @@ pub unsafe fn _mm_loaddup_pd(mem_addr: *const f64) -> __m128d {
/// Duplicate odd-indexed single-precision (32-bit) floating-point elements
/// from `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movehdup_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movehdup_ps)
#[inline]
#[target_feature(enable = "sse3")]
#[cfg_attr(test, assert_instr(movshdup))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_movehdup_ps(a: __m128) -> __m128 {
- simd_shuffle4!(a, a, [1, 1, 3, 3])
+ simd_shuffle!(a, a, [1, 1, 3, 3])
}
/// Duplicate even-indexed single-precision (32-bit) floating-point elements
/// from `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_moveldup_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_moveldup_ps)
#[inline]
#[target_feature(enable = "sse3")]
#[cfg_attr(test, assert_instr(movsldup))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_moveldup_ps(a: __m128) -> __m128 {
- simd_shuffle4!(a, a, [0, 0, 2, 2])
+ simd_shuffle!(a, a, [0, 0, 2, 2])
}
#[allow(improper_ctypes)]
diff --git a/library/stdarch/crates/core_arch/src/x86/sse41.rs b/library/stdarch/crates/core_arch/src/x86/sse41.rs
index 3162ad7d9..7ba86e5f7 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse41.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse41.rs
@@ -56,7 +56,7 @@ pub const _MM_FROUND_NEARBYINT: i32 = _MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTI
/// If the high bit is set the element of `a` is selected. The element
/// of `b` is selected otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blendv_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_epi8)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pblendvb))]
@@ -71,7 +71,7 @@ pub unsafe fn _mm_blendv_epi8(a: __m128i, b: __m128i, mask: __m128i) -> __m128i
/// corresponding element of `a`, and a set bit the corresponding
/// element of `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blend_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_epi16)
#[inline]
#[target_feature(enable = "sse4.1")]
// Note: LLVM7 prefers the single-precision floating-point domain when possible
@@ -81,14 +81,14 @@ pub unsafe fn _mm_blendv_epi8(a: __m128i, b: __m128i, mask: __m128i) -> __m128i
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_blend_epi16<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(pblendw(a.as_i16x8(), b.as_i16x8(), IMM8 as u8))
}
/// Blend packed double-precision (64-bit) floating-point elements from `a`
/// and `b` using `mask`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blendv_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_pd)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(blendvpd))]
@@ -100,7 +100,7 @@ pub unsafe fn _mm_blendv_pd(a: __m128d, b: __m128d, mask: __m128d) -> __m128d {
/// Blend packed single-precision (32-bit) floating-point elements from `a`
/// and `b` using `mask`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blendv_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_ps)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(blendvps))]
@@ -112,7 +112,7 @@ pub unsafe fn _mm_blendv_ps(a: __m128, b: __m128, mask: __m128) -> __m128 {
/// Blend packed double-precision (64-bit) floating-point elements from `a`
/// and `b` using control mask `IMM2`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blend_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_pd)
#[inline]
#[target_feature(enable = "sse4.1")]
// Note: LLVM7 prefers the single-precision floating-point domain when possible
@@ -122,21 +122,21 @@ pub unsafe fn _mm_blendv_ps(a: __m128, b: __m128, mask: __m128) -> __m128 {
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_blend_pd<const IMM2: i32>(a: __m128d, b: __m128d) -> __m128d {
- static_assert_imm2!(IMM2);
+ static_assert_uimm_bits!(IMM2, 2);
blendpd(a, b, IMM2 as u8)
}
/// Blend packed single-precision (32-bit) floating-point elements from `a`
/// and `b` using mask `IMM4`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blend_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_ps)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(blendps, IMM4 = 0b0101))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_blend_ps<const IMM4: i32>(a: __m128, b: __m128) -> __m128 {
- static_assert_imm4!(IMM4);
+ static_assert_uimm_bits!(IMM4, 4);
blendps(a, b, IMM4 as u8)
}
@@ -164,7 +164,7 @@ pub unsafe fn _mm_blend_ps<const IMM4: i32>(a: __m128, b: __m128) -> __m128 {
/// # }
/// # }
/// ```
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_extract_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_ps)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(
@@ -174,7 +174,7 @@ pub unsafe fn _mm_blend_ps<const IMM4: i32>(a: __m128, b: __m128) -> __m128 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_extract_ps<const IMM8: i32>(a: __m128) -> i32 {
- static_assert_imm2!(IMM8);
+ static_assert_uimm_bits!(IMM8, 2);
transmute(simd_extract::<_, f32>(a, IMM8 as u32))
}
@@ -183,20 +183,20 @@ pub unsafe fn _mm_extract_ps<const IMM8: i32>(a: __m128) -> i32 {
///
/// See [LLVM commit D20468](https://reviews.llvm.org/D20468).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_extract_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi8)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pextrb, IMM8 = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_extract_epi8<const IMM8: i32>(a: __m128i) -> i32 {
- static_assert_imm4!(IMM8);
+ static_assert_uimm_bits!(IMM8, 4);
simd_extract::<_, u8>(a.as_u8x16(), IMM8 as u32) as i32
}
/// Extracts an 32-bit integer from `a` selected with `IMM8`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_extract_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(
@@ -206,7 +206,7 @@ pub unsafe fn _mm_extract_epi8<const IMM8: i32>(a: __m128i) -> i32 {
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_extract_epi32<const IMM8: i32>(a: __m128i) -> i32 {
- static_assert_imm2!(IMM8);
+ static_assert_uimm_bits!(IMM8, 2);
simd_extract::<_, i32>(a.as_i32x4(), IMM8 as u32)
}
@@ -233,49 +233,49 @@ pub unsafe fn _mm_extract_epi32<const IMM8: i32>(a: __m128i) -> i32 {
/// * Bits `[3:0]`: If any of these bits are set, the corresponding result
/// element is cleared.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_insert_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_ps)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(insertps, IMM8 = 0b1010))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_insert_ps<const IMM8: i32>(a: __m128, b: __m128) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
insertps(a, b, IMM8 as u8)
}
/// Returns a copy of `a` with the 8-bit integer from `i` inserted at a
/// location specified by `IMM8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_insert_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi8)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pinsrb, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_insert_epi8<const IMM8: i32>(a: __m128i, i: i32) -> __m128i {
- static_assert_imm4!(IMM8);
+ static_assert_uimm_bits!(IMM8, 4);
transmute(simd_insert(a.as_i8x16(), IMM8 as u32, i as i8))
}
/// Returns a copy of `a` with the 32-bit integer from `i` inserted at a
/// location specified by `IMM8`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_insert_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pinsrd, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_insert_epi32<const IMM8: i32>(a: __m128i, i: i32) -> __m128i {
- static_assert_imm2!(IMM8);
+ static_assert_uimm_bits!(IMM8, 2);
transmute(simd_insert(a.as_i32x4(), IMM8 as u32, i))
}
/// Compares packed 8-bit integers in `a` and `b` and returns packed maximum
/// values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi8)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmaxsb))]
@@ -289,7 +289,7 @@ pub unsafe fn _mm_max_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed unsigned 16-bit integers in `a` and `b`, and returns packed
/// maximum.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu16)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmaxuw))]
@@ -303,7 +303,7 @@ pub unsafe fn _mm_max_epu16(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 32-bit integers in `a` and `b`, and returns packed maximum
/// values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmaxsd))]
@@ -317,7 +317,7 @@ pub unsafe fn _mm_max_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed unsigned 32-bit integers in `a` and `b`, and returns packed
/// maximum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmaxud))]
@@ -331,7 +331,7 @@ pub unsafe fn _mm_max_epu32(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 8-bit integers in `a` and `b` and returns packed minimum
/// values in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi8)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pminsb))]
@@ -345,7 +345,7 @@ pub unsafe fn _mm_min_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed unsigned 16-bit integers in `a` and `b`, and returns packed
/// minimum.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epu16)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pminuw))]
@@ -359,7 +359,7 @@ pub unsafe fn _mm_min_epu16(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 32-bit integers in `a` and `b`, and returns packed minimum
/// values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pminsd))]
@@ -373,7 +373,7 @@ pub unsafe fn _mm_min_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed unsigned 32-bit integers in `a` and `b`, and returns packed
/// minimum values.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epu32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epu32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pminud))]
@@ -387,7 +387,7 @@ pub unsafe fn _mm_min_epu32(a: __m128i, b: __m128i) -> __m128i {
/// Converts packed 32-bit integers from `a` and `b` to packed 16-bit integers
/// using unsigned saturation
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_packus_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packus_epi32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(packusdw))]
@@ -398,7 +398,7 @@ pub unsafe fn _mm_packus_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Compares packed 64-bit integers in `a` and `b` for equality
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi64)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pcmpeqq))]
@@ -409,161 +409,161 @@ pub unsafe fn _mm_cmpeq_epi64(a: __m128i, b: __m128i) -> __m128i {
/// Sign extend packed 8-bit integers in `a` to packed 16-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi8_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi8_epi16)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmovsxbw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepi8_epi16(a: __m128i) -> __m128i {
let a = a.as_i8x16();
- let a: i8x8 = simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
+ let a: i8x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
transmute(simd_cast::<_, i16x8>(a))
}
/// Sign extend packed 8-bit integers in `a` to packed 32-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi8_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi8_epi32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmovsxbd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepi8_epi32(a: __m128i) -> __m128i {
let a = a.as_i8x16();
- let a: i8x4 = simd_shuffle4!(a, a, [0, 1, 2, 3]);
+ let a: i8x4 = simd_shuffle!(a, a, [0, 1, 2, 3]);
transmute(simd_cast::<_, i32x4>(a))
}
/// Sign extend packed 8-bit integers in the low 8 bytes of `a` to packed
/// 64-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi8_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi8_epi64)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmovsxbq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepi8_epi64(a: __m128i) -> __m128i {
let a = a.as_i8x16();
- let a: i8x2 = simd_shuffle2!(a, a, [0, 1]);
+ let a: i8x2 = simd_shuffle!(a, a, [0, 1]);
transmute(simd_cast::<_, i64x2>(a))
}
/// Sign extend packed 16-bit integers in `a` to packed 32-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi16_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi16_epi32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmovsxwd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepi16_epi32(a: __m128i) -> __m128i {
let a = a.as_i16x8();
- let a: i16x4 = simd_shuffle4!(a, a, [0, 1, 2, 3]);
+ let a: i16x4 = simd_shuffle!(a, a, [0, 1, 2, 3]);
transmute(simd_cast::<_, i32x4>(a))
}
/// Sign extend packed 16-bit integers in `a` to packed 64-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi16_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi16_epi64)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmovsxwq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepi16_epi64(a: __m128i) -> __m128i {
let a = a.as_i16x8();
- let a: i16x2 = simd_shuffle2!(a, a, [0, 1]);
+ let a: i16x2 = simd_shuffle!(a, a, [0, 1]);
transmute(simd_cast::<_, i64x2>(a))
}
/// Sign extend packed 32-bit integers in `a` to packed 64-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_epi64)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmovsxdq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepi32_epi64(a: __m128i) -> __m128i {
let a = a.as_i32x4();
- let a: i32x2 = simd_shuffle2!(a, a, [0, 1]);
+ let a: i32x2 = simd_shuffle!(a, a, [0, 1]);
transmute(simd_cast::<_, i64x2>(a))
}
/// Zeroes extend packed unsigned 8-bit integers in `a` to packed 16-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu8_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi16)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmovzxbw))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepu8_epi16(a: __m128i) -> __m128i {
let a = a.as_u8x16();
- let a: u8x8 = simd_shuffle8!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
+ let a: u8x8 = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);
transmute(simd_cast::<_, i16x8>(a))
}
/// Zeroes extend packed unsigned 8-bit integers in `a` to packed 32-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu8_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmovzxbd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepu8_epi32(a: __m128i) -> __m128i {
let a = a.as_u8x16();
- let a: u8x4 = simd_shuffle4!(a, a, [0, 1, 2, 3]);
+ let a: u8x4 = simd_shuffle!(a, a, [0, 1, 2, 3]);
transmute(simd_cast::<_, i32x4>(a))
}
/// Zeroes extend packed unsigned 8-bit integers in `a` to packed 64-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu8_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi64)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmovzxbq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepu8_epi64(a: __m128i) -> __m128i {
let a = a.as_u8x16();
- let a: u8x2 = simd_shuffle2!(a, a, [0, 1]);
+ let a: u8x2 = simd_shuffle!(a, a, [0, 1]);
transmute(simd_cast::<_, i64x2>(a))
}
/// Zeroes extend packed unsigned 16-bit integers in `a`
/// to packed 32-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu16_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu16_epi32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmovzxwd))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepu16_epi32(a: __m128i) -> __m128i {
let a = a.as_u16x8();
- let a: u16x4 = simd_shuffle4!(a, a, [0, 1, 2, 3]);
+ let a: u16x4 = simd_shuffle!(a, a, [0, 1, 2, 3]);
transmute(simd_cast::<_, i32x4>(a))
}
/// Zeroes extend packed unsigned 16-bit integers in `a`
/// to packed 64-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu16_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu16_epi64)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmovzxwq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepu16_epi64(a: __m128i) -> __m128i {
let a = a.as_u16x8();
- let a: u16x2 = simd_shuffle2!(a, a, [0, 1]);
+ let a: u16x2 = simd_shuffle!(a, a, [0, 1]);
transmute(simd_cast::<_, i64x2>(a))
}
/// Zeroes extend packed unsigned 32-bit integers in `a`
/// to packed 64-bit integers
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu32_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu32_epi64)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmovzxdq))]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cvtepu32_epi64(a: __m128i) -> __m128i {
let a = a.as_u32x4();
- let a: u32x2 = simd_shuffle2!(a, a, [0, 1]);
+ let a: u32x2 = simd_shuffle!(a, a, [0, 1]);
transmute(simd_cast::<_, i64x2>(a))
}
@@ -575,14 +575,14 @@ pub unsafe fn _mm_cvtepu32_epi64(a: __m128i) -> __m128i {
/// the dot product will be stored in the return value component. Otherwise if
/// the broadcast mask bit is zero then the return component will be zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_dp_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dp_pd)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(dppd, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_dp_pd<const IMM8: i32>(a: __m128d, b: __m128d) -> __m128d {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
dppd(a, b, IMM8 as u8)
}
@@ -594,14 +594,14 @@ pub unsafe fn _mm_dp_pd<const IMM8: i32>(a: __m128d, b: __m128d) -> __m128d {
/// the dot product will be stored in the return value component. Otherwise if
/// the broadcast mask bit is zero then the return component will be zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_dp_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dp_ps)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(dpps, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_dp_ps<const IMM8: i32>(a: __m128, b: __m128) -> __m128 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
dpps(a, b, IMM8 as u8)
}
@@ -609,7 +609,7 @@ pub unsafe fn _mm_dp_ps<const IMM8: i32>(a: __m128, b: __m128) -> __m128 {
/// down to an integer value, and stores the results as packed double-precision
/// floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_pd)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(roundpd))]
@@ -622,7 +622,7 @@ pub unsafe fn _mm_floor_pd(a: __m128d) -> __m128d {
/// down to an integer value, and stores the results as packed single-precision
/// floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_ps)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(roundps))]
@@ -637,7 +637,7 @@ pub unsafe fn _mm_floor_ps(a: __m128) -> __m128 {
/// and copies the upper element from `a` to the upper element of the intrinsic
/// result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_sd)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(roundsd))]
@@ -652,7 +652,7 @@ pub unsafe fn _mm_floor_sd(a: __m128d, b: __m128d) -> __m128d {
/// and copies the upper 3 packed elements from `a` to the upper elements
/// of the intrinsic result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_ss)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(roundss))]
@@ -665,7 +665,7 @@ pub unsafe fn _mm_floor_ss(a: __m128, b: __m128) -> __m128 {
/// up to an integer value, and stores the results as packed double-precision
/// floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_pd)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(roundpd))]
@@ -678,7 +678,7 @@ pub unsafe fn _mm_ceil_pd(a: __m128d) -> __m128d {
/// up to an integer value, and stores the results as packed single-precision
/// floating-point elements.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_ps)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(roundps))]
@@ -689,11 +689,11 @@ pub unsafe fn _mm_ceil_ps(a: __m128) -> __m128 {
/// Round the lower double-precision (64-bit) floating-point element in `b`
/// up to an integer value, store the result as a double-precision
-/// floating-point element in the lower element of the intrisic result,
+/// floating-point element in the lower element of the intrinsic result,
/// and copies the upper element from `a` to the upper element
/// of the intrinsic result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_sd)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(roundsd))]
@@ -708,7 +708,7 @@ pub unsafe fn _mm_ceil_sd(a: __m128d, b: __m128d) -> __m128d {
/// and copies the upper 3 packed elements from `a` to the upper elements
/// of the intrinsic result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_ss)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(roundss))]
@@ -747,14 +747,14 @@ pub unsafe fn _mm_ceil_ss(a: __m128, b: __m128) -> __m128 {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_pd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_pd)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(roundpd, ROUNDING = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_round_pd<const ROUNDING: i32>(a: __m128d) -> __m128d {
- static_assert_imm4!(ROUNDING);
+ static_assert_uimm_bits!(ROUNDING, 4);
roundpd(a, ROUNDING)
}
@@ -788,14 +788,14 @@ pub unsafe fn _mm_round_pd<const ROUNDING: i32>(a: __m128d) -> __m128d {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_ps)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_ps)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(roundps, ROUNDING = 0))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_round_ps<const ROUNDING: i32>(a: __m128) -> __m128 {
- static_assert_imm4!(ROUNDING);
+ static_assert_uimm_bits!(ROUNDING, 4);
roundps(a, ROUNDING)
}
@@ -831,14 +831,14 @@ pub unsafe fn _mm_round_ps<const ROUNDING: i32>(a: __m128) -> __m128 {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_sd)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(roundsd, ROUNDING = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_round_sd<const ROUNDING: i32>(a: __m128d, b: __m128d) -> __m128d {
- static_assert_imm4!(ROUNDING);
+ static_assert_uimm_bits!(ROUNDING, 4);
roundsd(a, b, ROUNDING)
}
@@ -874,14 +874,14 @@ pub unsafe fn _mm_round_sd<const ROUNDING: i32>(a: __m128d, b: __m128d) -> __m12
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_ss)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(roundss, ROUNDING = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_round_ss<const ROUNDING: i32>(a: __m128, b: __m128) -> __m128 {
- static_assert_imm4!(ROUNDING);
+ static_assert_uimm_bits!(ROUNDING, 4);
roundss(a, b, ROUNDING)
}
@@ -905,7 +905,7 @@ pub unsafe fn _mm_round_ss<const ROUNDING: i32>(a: __m128, b: __m128) -> __m128
/// * bits `[18:16]` - contain the index of the minimum value
/// * remaining bits are set to `0`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_minpos_epu16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_minpos_epu16)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(phminposuw))]
@@ -917,7 +917,7 @@ pub unsafe fn _mm_minpos_epu16(a: __m128i) -> __m128i {
/// Multiplies the low 32-bit integers from each packed 64-bit
/// element in `a` and `b`, and returns the signed 64-bit result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_epi32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmuldq))]
@@ -933,7 +933,7 @@ pub unsafe fn _mm_mul_epi32(a: __m128i, b: __m128i) -> __m128i {
/// arithmetic `pmulld __m128i::splat(i32::MAX), __m128i::splat(2)` would
/// return a negative number.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mullo_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mullo_epi32)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pmulld))]
@@ -974,14 +974,14 @@ pub unsafe fn _mm_mullo_epi32(a: __m128i, b: __m128i) -> __m128i {
/// * A `__m128i` vector containing the sums of the sets of absolute
/// differences between both operands.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mpsadbw_epu8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mpsadbw_epu8)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(mpsadbw, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_mpsadbw_epu8<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm3!(IMM8);
+ static_assert_uimm_bits!(IMM8, 3);
transmute(mpsadbw(a.as_u8x16(), b.as_u8x16(), IMM8 as u8))
}
@@ -999,7 +999,7 @@ pub unsafe fn _mm_mpsadbw_epu8<const IMM8: i32>(a: __m128i, b: __m128i) -> __m12
/// * `1` - if the specified bits are all zeros,
/// * `0` - otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testz_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testz_si128)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(ptest))]
@@ -1022,7 +1022,7 @@ pub unsafe fn _mm_testz_si128(a: __m128i, mask: __m128i) -> i32 {
/// * `1` - if the specified bits are all ones,
/// * `0` - otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testc_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testc_si128)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(ptest))]
@@ -1045,7 +1045,7 @@ pub unsafe fn _mm_testc_si128(a: __m128i, mask: __m128i) -> i32 {
/// * `1` - if the specified bits are neither all zeros nor all ones,
/// * `0` - otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testnzc_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testnzc_si128)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(ptest))]
@@ -1068,7 +1068,7 @@ pub unsafe fn _mm_testnzc_si128(a: __m128i, mask: __m128i) -> i32 {
/// * `1` - if the specified bits are all zeros,
/// * `0` - otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_all_zeros)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_all_zeros)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(ptest))]
@@ -1089,7 +1089,7 @@ pub unsafe fn _mm_test_all_zeros(a: __m128i, mask: __m128i) -> i32 {
/// * `1` - if the bits specified in the operand are all set to 1,
/// * `0` - otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_all_ones)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_all_ones)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pcmpeqd))]
@@ -1113,7 +1113,7 @@ pub unsafe fn _mm_test_all_ones(a: __m128i) -> i32 {
/// * `1` - if the specified bits are neither all zeros nor all ones,
/// * `0` - otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_mix_ones_zeros)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_mix_ones_zeros)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(ptest))]
diff --git a/library/stdarch/crates/core_arch/src/x86/sse42.rs b/library/stdarch/crates/core_arch/src/x86/sse42.rs
index 4eb12480b..76a6a4075 100644
--- a/library/stdarch/crates/core_arch/src/x86/sse42.rs
+++ b/library/stdarch/crates/core_arch/src/x86/sse42.rs
@@ -67,14 +67,14 @@ pub const _SIDD_UNIT_MASK: i32 = 0b0100_0000;
/// Compares packed strings with implicit lengths in `a` and `b` using the
/// control in `IMM8`, and return the generated mask.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpistrm)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrm)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpistrm, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpistrm<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(pcmpistrm128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8))
}
@@ -255,14 +255,14 @@ pub unsafe fn _mm_cmpistrm<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
/// # }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpistri)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistri)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpistri, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpistri<const IMM8: i32>(a: __m128i, b: __m128i) -> i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pcmpistri128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8)
}
@@ -270,14 +270,14 @@ pub unsafe fn _mm_cmpistri<const IMM8: i32>(a: __m128i, b: __m128i) -> i32 {
/// control in `IMM8`, and return `1` if any character in `b` was null.
/// and `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpistrz)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrz)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpistri, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpistrz<const IMM8: i32>(a: __m128i, b: __m128i) -> i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pcmpistriz128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8)
}
@@ -285,14 +285,14 @@ pub unsafe fn _mm_cmpistrz<const IMM8: i32>(a: __m128i, b: __m128i) -> i32 {
/// control in `IMM8`, and return `1` if the resulting mask was non-zero,
/// and `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpistrc)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrc)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpistri, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpistrc<const IMM8: i32>(a: __m128i, b: __m128i) -> i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pcmpistric128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8)
}
@@ -300,28 +300,28 @@ pub unsafe fn _mm_cmpistrc<const IMM8: i32>(a: __m128i, b: __m128i) -> i32 {
/// control in `IMM8`, and returns `1` if any character in `a` was null,
/// and `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpistrs)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrs)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpistri, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpistrs<const IMM8: i32>(a: __m128i, b: __m128i) -> i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pcmpistris128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8)
}
/// Compares packed strings with implicit lengths in `a` and `b` using the
/// control in `IMM8`, and return bit `0` of the resulting bit mask.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpistro)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistro)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpistri, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpistro<const IMM8: i32>(a: __m128i, b: __m128i) -> i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pcmpistrio128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8)
}
@@ -329,28 +329,28 @@ pub unsafe fn _mm_cmpistro<const IMM8: i32>(a: __m128i, b: __m128i) -> i32 {
/// control in `IMM8`, and return `1` if `b` did not contain a null
/// character and the resulting mask was zero, and `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpistra)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistra)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpistri, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpistra<const IMM8: i32>(a: __m128i, b: __m128i) -> i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pcmpistria128(a.as_i8x16(), b.as_i8x16(), IMM8 as i8)
}
/// Compares packed strings in `a` and `b` with lengths `la` and `lb`
/// using the control in `IMM8`, and return the generated mask.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpestrm)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrm)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpestrm, IMM8 = 0))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpestrm<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb: i32) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
transmute(pcmpestrm128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8))
}
@@ -432,14 +432,14 @@ pub unsafe fn _mm_cmpestrm<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb:
/// [`_SIDD_MOST_SIGNIFICANT`]: constant._SIDD_MOST_SIGNIFICANT.html
/// [`_mm_cmpistri`]: fn._mm_cmpistri.html
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpestri)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestri)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpestri, IMM8 = 0))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpestri<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pcmpestri128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8)
}
@@ -447,14 +447,14 @@ pub unsafe fn _mm_cmpestri<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb:
/// using the control in `IMM8`, and return `1` if any character in
/// `b` was null, and `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpestrz)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrz)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpestri, IMM8 = 0))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpestrz<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pcmpestriz128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8)
}
@@ -462,14 +462,14 @@ pub unsafe fn _mm_cmpestrz<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb:
/// using the control in `IMM8`, and return `1` if the resulting mask
/// was non-zero, and `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpestrc)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrc)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpestri, IMM8 = 0))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpestrc<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pcmpestric128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8)
}
@@ -477,14 +477,14 @@ pub unsafe fn _mm_cmpestrc<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb:
/// using the control in `IMM8`, and return `1` if any character in
/// a was null, and `0` otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpestrs)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrs)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpestri, IMM8 = 0))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpestrs<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pcmpestris128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8)
}
@@ -492,14 +492,14 @@ pub unsafe fn _mm_cmpestrs<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb:
/// using the control in `IMM8`, and return bit `0` of the resulting
/// bit mask.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpestro)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestro)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpestri, IMM8 = 0))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpestro<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pcmpestrio128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8)
}
@@ -508,21 +508,21 @@ pub unsafe fn _mm_cmpestro<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb:
/// contain a null character and the resulting mask was zero, and `0`
/// otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpestra)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestra)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpestri, IMM8 = 0))]
#[rustc_legacy_const_generics(4)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_cmpestra<const IMM8: i32>(a: __m128i, la: i32, b: __m128i, lb: i32) -> i32 {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pcmpestria128(a.as_i8x16(), la, b.as_i8x16(), lb, IMM8 as i8)
}
/// Starting with the initial value in `crc`, return the accumulated
/// CRC32-C value for unsigned 8-bit integer `v`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_crc32_u8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u8)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(crc32))]
@@ -534,7 +534,7 @@ pub unsafe fn _mm_crc32_u8(crc: u32, v: u8) -> u32 {
/// Starting with the initial value in `crc`, return the accumulated
/// CRC32-C value for unsigned 16-bit integer `v`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_crc32_u16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u16)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(crc32))]
@@ -546,7 +546,7 @@ pub unsafe fn _mm_crc32_u16(crc: u32, v: u16) -> u32 {
/// Starting with the initial value in `crc`, return the accumulated
/// CRC32-C value for unsigned 32-bit integer `v`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_crc32_u32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u32)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(crc32))]
@@ -558,7 +558,7 @@ pub unsafe fn _mm_crc32_u32(crc: u32, v: u32) -> u32 {
/// Compares packed 64-bit integers in `a` and `b` for greater-than,
/// return the results.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi64)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(pcmpgtq))]
diff --git a/library/stdarch/crates/core_arch/src/x86/ssse3.rs b/library/stdarch/crates/core_arch/src/x86/ssse3.rs
index 4beb496b6..bdc6836ac 100644
--- a/library/stdarch/crates/core_arch/src/x86/ssse3.rs
+++ b/library/stdarch/crates/core_arch/src/x86/ssse3.rs
@@ -11,7 +11,7 @@ use stdarch_test::assert_instr;
/// Computes the absolute value of packed 8-bit signed integers in `a` and
/// return the unsigned results.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi8)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(pabsb))]
@@ -24,7 +24,7 @@ pub unsafe fn _mm_abs_epi8(a: __m128i) -> __m128i {
/// `a` and
/// return the 16-bit unsigned integer
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi16)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(pabsw))]
@@ -37,7 +37,7 @@ pub unsafe fn _mm_abs_epi16(a: __m128i) -> __m128i {
/// `a` and
/// return the 32-bit unsigned integer
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi32)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(pabsd))]
@@ -71,7 +71,7 @@ pub unsafe fn _mm_abs_epi32(a: __m128i) -> __m128i {
/// }
/// ```
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_epi8)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(pshufb))]
@@ -83,14 +83,14 @@ pub unsafe fn _mm_shuffle_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Concatenate 16-byte blocks in `a` and `b` into a 32-byte temporary result,
/// shift the result right by `n` bytes, and returns the low 16 bytes.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_alignr_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_epi8)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(palignr, IMM8 = 15))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_alignr_epi8<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
// If palignr is shifting the pair of vectors more than the size of two
// lanes, emit zero.
if IMM8 > 32 {
@@ -113,10 +113,10 @@ pub unsafe fn _mm_alignr_epi8<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128
shift + i
}
}
- let r: i8x16 = simd_shuffle16!(
+ let r: i8x16 = simd_shuffle!(
b.as_i8x16(),
a.as_i8x16(),
- <const IMM8: i32> [
+ [
mask(IMM8 as u32, 0),
mask(IMM8 as u32, 1),
mask(IMM8 as u32, 2),
@@ -141,7 +141,7 @@ pub unsafe fn _mm_alignr_epi8<const IMM8: i32>(a: __m128i, b: __m128i) -> __m128
/// Horizontally adds the adjacent pairs of values contained in 2 packed
/// 128-bit vectors of `[8 x i16]`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_epi16)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(phaddw))]
@@ -154,7 +154,7 @@ pub unsafe fn _mm_hadd_epi16(a: __m128i, b: __m128i) -> __m128i {
/// 128-bit vectors of `[8 x i16]`. Positive sums greater than 7FFFh are
/// saturated to 7FFFh. Negative sums less than 8000h are saturated to 8000h.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadds_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadds_epi16)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(phaddsw))]
@@ -166,7 +166,7 @@ pub unsafe fn _mm_hadds_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Horizontally adds the adjacent pairs of values contained in 2 packed
/// 128-bit vectors of `[4 x i32]`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_epi32)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(phaddd))]
@@ -178,7 +178,7 @@ pub unsafe fn _mm_hadd_epi32(a: __m128i, b: __m128i) -> __m128i {
/// Horizontally subtract the adjacent pairs of values contained in 2
/// packed 128-bit vectors of `[8 x i16]`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsub_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_epi16)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(phsubw))]
@@ -192,7 +192,7 @@ pub unsafe fn _mm_hsub_epi16(a: __m128i, b: __m128i) -> __m128i {
/// 7FFFh are saturated to 7FFFh. Negative differences less than 8000h are
/// saturated to 8000h.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsubs_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsubs_epi16)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(phsubsw))]
@@ -204,7 +204,7 @@ pub unsafe fn _mm_hsubs_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Horizontally subtract the adjacent pairs of values contained in 2
/// packed 128-bit vectors of `[4 x i32]`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsub_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_epi32)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(phsubd))]
@@ -219,7 +219,7 @@ pub unsafe fn _mm_hsub_epi32(a: __m128i, b: __m128i) -> __m128i {
/// contiguous products with signed saturation, and writes the 16-bit sums to
/// the corresponding bits in the destination.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maddubs_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maddubs_epi16)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(pmaddubsw))]
@@ -232,7 +232,7 @@ pub unsafe fn _mm_maddubs_epi16(a: __m128i, b: __m128i) -> __m128i {
/// product to the 18 most significant bits by right-shifting, round the
/// truncated value by adding 1, and write bits `[16:1]` to the destination.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mulhrs_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhrs_epi16)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(pmulhrsw))]
@@ -246,7 +246,7 @@ pub unsafe fn _mm_mulhrs_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Elements in result are zeroed out when the corresponding element in `b`
/// is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_epi8)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_epi8)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(psignb))]
@@ -260,7 +260,7 @@ pub unsafe fn _mm_sign_epi8(a: __m128i, b: __m128i) -> __m128i {
/// Elements in result are zeroed out when the corresponding element in `b`
/// is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_epi16)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_epi16)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(psignw))]
@@ -274,7 +274,7 @@ pub unsafe fn _mm_sign_epi16(a: __m128i, b: __m128i) -> __m128i {
/// Element in result are zeroed out when the corresponding element in `b`
/// is zero.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_epi32)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_epi32)
#[inline]
#[target_feature(enable = "ssse3")]
#[cfg_attr(test, assert_instr(psignd))]
diff --git a/library/stdarch/crates/core_arch/src/x86/test.rs b/library/stdarch/crates/core_arch/src/x86/test.rs
index bab89e61a..ec4298033 100644
--- a/library/stdarch/crates/core_arch/src/x86/test.rs
+++ b/library/stdarch/crates/core_arch/src/x86/test.rs
@@ -94,7 +94,7 @@ mod x86_polyfill {
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm_insert_epi64<const INDEX: i32>(a: __m128i, val: i64) -> __m128i {
- static_assert_imm1!(INDEX);
+ static_assert_uimm_bits!(INDEX, 1);
#[repr(C)]
union A {
a: __m128i,
@@ -108,7 +108,7 @@ mod x86_polyfill {
#[target_feature(enable = "avx2")]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_insert_epi64<const INDEX: i32>(a: __m256i, val: i64) -> __m256i {
- static_assert_imm2!(INDEX);
+ static_assert_uimm_bits!(INDEX, 2);
#[repr(C)]
union A {
a: __m256i,
diff --git a/library/stdarch/crates/core_arch/src/x86/vaes.rs b/library/stdarch/crates/core_arch/src/x86/vaes.rs
index e09f8a113..dc24ae025 100644
--- a/library/stdarch/crates/core_arch/src/x86/vaes.rs
+++ b/library/stdarch/crates/core_arch/src/x86/vaes.rs
@@ -36,7 +36,7 @@ extern "C" {
/// Performs one round of an AES encryption flow on each 128-bit word (state) in `a` using
/// the corresponding 128-bit word (key) in `round_key`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesenc_epi128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_aesenc_epi128)
#[inline]
#[target_feature(enable = "vaes")]
#[cfg_attr(test, assert_instr(vaesenc))]
@@ -47,7 +47,7 @@ pub unsafe fn _mm256_aesenc_epi128(a: __m256i, round_key: __m256i) -> __m256i {
/// Performs the last round of an AES encryption flow on each 128-bit word (state) in `a` using
/// the corresponding 128-bit word (key) in `round_key`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesenclast_epi128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_aesenclast_epi128)
#[inline]
#[target_feature(enable = "vaes")]
#[cfg_attr(test, assert_instr(vaesenclast))]
@@ -58,7 +58,7 @@ pub unsafe fn _mm256_aesenclast_epi128(a: __m256i, round_key: __m256i) -> __m256
/// Performs one round of an AES decryption flow on each 128-bit word (state) in `a` using
/// the corresponding 128-bit word (key) in `round_key`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesdec_epi128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_aesdec_epi128)
#[inline]
#[target_feature(enable = "vaes")]
#[cfg_attr(test, assert_instr(vaesdec))]
@@ -69,7 +69,7 @@ pub unsafe fn _mm256_aesdec_epi128(a: __m256i, round_key: __m256i) -> __m256i {
/// Performs the last round of an AES decryption flow on each 128-bit word (state) in `a` using
/// the corresponding 128-bit word (key) in `round_key`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesdeclast_epi128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_aesdeclast_epi128)
#[inline]
#[target_feature(enable = "vaes")]
#[cfg_attr(test, assert_instr(vaesdeclast))]
@@ -80,7 +80,7 @@ pub unsafe fn _mm256_aesdeclast_epi128(a: __m256i, round_key: __m256i) -> __m256
/// Performs one round of an AES encryption flow on each 128-bit word (state) in `a` using
/// the corresponding 128-bit word (key) in `round_key`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesenc_epi128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_aesenc_epi128)
#[inline]
#[target_feature(enable = "vaes,avx512f")]
#[cfg_attr(test, assert_instr(vaesenc))]
@@ -91,7 +91,7 @@ pub unsafe fn _mm512_aesenc_epi128(a: __m512i, round_key: __m512i) -> __m512i {
/// Performs the last round of an AES encryption flow on each 128-bit word (state) in `a` using
/// the corresponding 128-bit word (key) in `round_key`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesenclast_epi128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_aesenclast_epi128)
#[inline]
#[target_feature(enable = "vaes,avx512f")]
#[cfg_attr(test, assert_instr(vaesenclast))]
@@ -102,7 +102,7 @@ pub unsafe fn _mm512_aesenclast_epi128(a: __m512i, round_key: __m512i) -> __m512
/// Performs one round of an AES decryption flow on each 128-bit word (state) in `a` using
/// the corresponding 128-bit word (key) in `round_key`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesdec_epi128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_aesdec_epi128)
#[inline]
#[target_feature(enable = "vaes,avx512f")]
#[cfg_attr(test, assert_instr(vaesdec))]
@@ -113,7 +113,7 @@ pub unsafe fn _mm512_aesdec_epi128(a: __m512i, round_key: __m512i) -> __m512i {
/// Performs the last round of an AES decryption flow on each 128-bit word (state) in `a` using
/// the corresponding 128-bit word (key) in `round_key`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesdeclast_epi128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_aesdeclast_epi128)
#[inline]
#[target_feature(enable = "vaes,avx512f")]
#[cfg_attr(test, assert_instr(vaesdeclast))]
diff --git a/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs b/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs
index ea76708b8..7a9769fb2 100644
--- a/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs
+++ b/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs
@@ -30,14 +30,14 @@ extern "C" {
/// should be used. Immediate bits other than 0 and 4 are ignored.
/// All lanes share immediate byte.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_clmulepi64_epi128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_clmulepi64_epi128)
#[inline]
#[target_feature(enable = "vpclmulqdq,avx512f")]
// technically according to Intel's documentation we don't need avx512f here, however LLVM gets confused otherwise
#[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm512_clmulepi64_epi128<const IMM8: i32>(a: __m512i, b: __m512i) -> __m512i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pclmulqdq_512(a, b, IMM8 as u8)
}
@@ -48,13 +48,13 @@ pub unsafe fn _mm512_clmulepi64_epi128<const IMM8: i32>(a: __m512i, b: __m512i)
/// should be used. Immediate bits other than 0 and 4 are ignored.
/// All lanes share immediate byte.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_clmulepi64_epi128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_clmulepi64_epi128)
#[inline]
#[target_feature(enable = "vpclmulqdq")]
#[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))]
#[rustc_legacy_const_generics(2)]
pub unsafe fn _mm256_clmulepi64_epi128<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i {
- static_assert_imm8!(IMM8);
+ static_assert_uimm_bits!(IMM8, 8);
pclmulqdq_256(a, b, IMM8 as u8)
}
diff --git a/library/stdarch/crates/core_arch/src/x86/xsave.rs b/library/stdarch/crates/core_arch/src/x86/xsave.rs
index 30f807e44..6cdddcfce 100644
--- a/library/stdarch/crates/core_arch/src/x86/xsave.rs
+++ b/library/stdarch/crates/core_arch/src/x86/xsave.rs
@@ -33,7 +33,7 @@ extern "C" {
/// The format of the XSAVE area is detailed in Section 13.4, “XSAVE Area,” of
/// Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xsave)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xsave)
#[inline]
#[target_feature(enable = "xsave")]
#[cfg_attr(test, assert_instr(xsave))]
@@ -49,7 +49,7 @@ pub unsafe fn _xsave(mem_addr: *mut u8, save_mask: u64) {
/// `mem_addr.HEADER.XSTATE_BV`. `mem_addr` must be aligned on a 64-byte
/// boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xrstor)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xrstor)
#[inline]
#[target_feature(enable = "xsave")]
#[cfg_attr(test, assert_instr(xrstor))]
@@ -69,7 +69,7 @@ pub const _XCR_XFEATURE_ENABLED_MASK: u32 = 0;
///
/// Currently only `XFEATURE_ENABLED_MASK` `XCR` is supported.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xsetbv)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xsetbv)
#[inline]
#[target_feature(enable = "xsave")]
#[cfg_attr(test, assert_instr(xsetbv))]
@@ -81,7 +81,7 @@ pub unsafe fn _xsetbv(a: u32, val: u64) {
/// Reads the contents of the extended control register `XCR`
/// specified in `xcr_no`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xgetbv)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xgetbv)
#[inline]
#[target_feature(enable = "xsave")]
#[cfg_attr(test, assert_instr(xgetbv))]
@@ -98,7 +98,7 @@ pub unsafe fn _xgetbv(xcr_no: u32) -> u64 {
/// the manner in which data is saved. The performance of this instruction will
/// be equal to or better than using the `XSAVE` instruction.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xsaveopt)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xsaveopt)
#[inline]
#[target_feature(enable = "xsave,xsaveopt")]
#[cfg_attr(test, assert_instr(xsaveopt))]
@@ -114,7 +114,7 @@ pub unsafe fn _xsaveopt(mem_addr: *mut u8, save_mask: u64) {
/// use init optimization. State is saved based on bits `[62:0]` in `save_mask`
/// and `XCR0`. `mem_addr` must be aligned on a 64-byte boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xsavec)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xsavec)
#[inline]
#[target_feature(enable = "xsave,xsavec")]
#[cfg_attr(test, assert_instr(xsavec))]
@@ -131,7 +131,7 @@ pub unsafe fn _xsavec(mem_addr: *mut u8, save_mask: u64) {
/// modified optimization. State is saved based on bits `[62:0]` in `save_mask`
/// and `XCR0`. `mem_addr` must be aligned on a 64-byte boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xsaves)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xsaves)
#[inline]
#[target_feature(enable = "xsave,xsaves")]
#[cfg_attr(test, assert_instr(xsaves))]
@@ -150,7 +150,7 @@ pub unsafe fn _xsaves(mem_addr: *mut u8, save_mask: u64) {
/// `mem_addr.HEADER.XSTATE_BV`. `mem_addr` must be aligned on a 64-byte
/// boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xrstors)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xrstors)
#[inline]
#[target_feature(enable = "xsave,xsaves")]
#[cfg_attr(test, assert_instr(xrstors))]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/abm.rs b/library/stdarch/crates/core_arch/src/x86_64/abm.rs
index 988074d67..251957dfd 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/abm.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/abm.rs
@@ -24,7 +24,7 @@ use stdarch_test::assert_instr;
///
/// When the operand is zero, it returns its size in bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_lzcnt_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_lzcnt_u64)
#[inline]
#[target_feature(enable = "lzcnt")]
#[cfg_attr(test, assert_instr(lzcnt))]
@@ -35,7 +35,7 @@ pub unsafe fn _lzcnt_u64(x: u64) -> u64 {
/// Counts the bits that are set.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_popcnt64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_popcnt64)
#[inline]
#[target_feature(enable = "popcnt")]
#[cfg_attr(test, assert_instr(popcnt))]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/avx.rs b/library/stdarch/crates/core_arch/src/x86_64/avx.rs
index 7ba26371c..f699f6164 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/avx.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/avx.rs
@@ -21,14 +21,14 @@ use crate::{
/// Copies `a` to result, and insert the 64-bit integer `i` into result
/// at the location specified by `index`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insert_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_insert_epi64)
#[inline]
#[rustc_legacy_const_generics(2)]
#[target_feature(enable = "avx")]
// This intrinsic has no corresponding instruction.
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_insert_epi64<const INDEX: i32>(a: __m256i, i: i64) -> __m256i {
- static_assert_imm2!(INDEX);
+ static_assert_uimm_bits!(INDEX, 2);
transmute(simd_insert(a.as_i64x4(), INDEX as u32, i))
}
diff --git a/library/stdarch/crates/core_arch/src/x86_64/avx2.rs b/library/stdarch/crates/core_arch/src/x86_64/avx2.rs
index 14447a137..3388568eb 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/avx2.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/avx2.rs
@@ -22,14 +22,14 @@ use crate::core_arch::{simd_llvm::*, x86::*};
/// Extracts a 64-bit integer from `a`, selected with `INDEX`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extract_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_extract_epi64)
#[inline]
#[target_feature(enable = "avx2")]
#[rustc_legacy_const_generics(1)]
// This intrinsic has no corresponding instruction.
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm256_extract_epi64<const INDEX: i32>(a: __m256i) -> i64 {
- static_assert_imm2!(INDEX);
+ static_assert_uimm_bits!(INDEX, 2);
simd_extract(a.as_i64x4(), INDEX as u32)
}
diff --git a/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs b/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs
index 5eed0502c..d414effa7 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/avx512f.rs
@@ -8,7 +8,7 @@ use stdarch_test::assert_instr;
/// Convert the lower double-precision (64-bit) floating-point element in a to a 64-bit integer, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_i64&expand=1792)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_i64&expand=1792)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si))]
@@ -18,7 +18,7 @@ pub unsafe fn _mm_cvtsd_i64(a: __m128d) -> i64 {
/// Convert the lower single-precision (32-bit) floating-point element in a to a 64-bit integer, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_i64&expand=1894)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_i64&expand=1894)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si))]
@@ -28,7 +28,7 @@ pub unsafe fn _mm_cvtss_i64(a: __m128) -> i64 {
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 64-bit integer, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_u64&expand=1902)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_u64&expand=1902)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2usi))]
@@ -38,7 +38,7 @@ pub unsafe fn _mm_cvtss_u64(a: __m128) -> u64 {
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 64-bit integer, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_u64&expand=1800)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_u64&expand=1800)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2usi))]
@@ -48,7 +48,7 @@ pub unsafe fn _mm_cvtsd_u64(a: __m128d) -> u64 {
/// Convert the signed 64-bit integer b to a single-precision (32-bit) floating-point element, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_cvti32_ss&expand=1643)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_cvti32_ss&expand=1643)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsi2ss))]
@@ -60,7 +60,7 @@ pub unsafe fn _mm_cvti64_ss(a: __m128, b: i64) -> __m128 {
/// Convert the signed 64-bit integer b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvti64_sd&expand=1644)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvti64_sd&expand=1644)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsi2sd))]
@@ -72,7 +72,7 @@ pub unsafe fn _mm_cvti64_sd(a: __m128d, b: i64) -> __m128d {
/// Convert the unsigned 64-bit integer b to a single-precision (32-bit) floating-point element, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu64_ss&expand=2035)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtu64_ss&expand=2035)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtusi2ss))]
@@ -84,7 +84,7 @@ pub unsafe fn _mm_cvtu64_ss(a: __m128, b: u64) -> __m128 {
/// Convert the unsigned 64-bit integer b to a double-precision (64-bit) floating-point element, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu64_sd&expand=2034)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtu64_sd&expand=2034)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtusi2sd))]
@@ -96,7 +96,7 @@ pub unsafe fn _mm_cvtu64_sd(a: __m128d, b: u64) -> __m128d {
/// Convert the lower double-precision (64-bit) floating-point element in a to a 64-bit integer with truncation, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_i64&expand=2016)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_i64&expand=2016)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si))]
@@ -106,7 +106,7 @@ pub unsafe fn _mm_cvttsd_i64(a: __m128d) -> i64 {
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 64-bit integer with truncation, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_u64&expand=2021)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_u64&expand=2021)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2usi))]
@@ -116,7 +116,7 @@ pub unsafe fn _mm_cvttsd_u64(a: __m128d) -> u64 {
/// Convert the lower single-precision (32-bit) floating-point element in a to a 64-bit integer with truncation, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=#text=_mm_cvttss_i64&expand=2023)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=#text=_mm_cvttss_i64&expand=2023)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si))]
@@ -126,7 +126,7 @@ pub unsafe fn _mm_cvttss_i64(a: __m128) -> i64 {
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 64-bit integer with truncation, and store the result in dst.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_u64&expand=2027)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_u64&expand=2027)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2usi))]
@@ -142,7 +142,7 @@ pub unsafe fn _mm_cvttss_u64(a: __m128) -> u64 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi64_sd&expand=1313)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundi64_sd&expand=1313)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsi2sd, ROUNDING = 8))]
@@ -162,7 +162,7 @@ pub unsafe fn _mm_cvt_roundi64_sd<const ROUNDING: i32>(a: __m128d, b: i64) -> __
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsi64_sd&expand=1367)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundsi64_sd&expand=1367)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsi2sd, ROUNDING = 8))]
@@ -182,7 +182,7 @@ pub unsafe fn _mm_cvt_roundsi64_sd<const ROUNDING: i32>(a: __m128d, b: i64) -> _
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi64_ss&expand=1314)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundi64_ss&expand=1314)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsi2ss, ROUNDING = 8))]
@@ -202,7 +202,7 @@ pub unsafe fn _mm_cvt_roundi64_ss<const ROUNDING: i32>(a: __m128, b: i64) -> __m
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundu64_sd&expand=1379)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundu64_sd&expand=1379)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtusi2sd, ROUNDING = 8))]
@@ -222,7 +222,7 @@ pub unsafe fn _mm_cvt_roundu64_sd<const ROUNDING: i32>(a: __m128d, b: u64) -> __
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsi64_ss&expand=1368)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundsi64_ss&expand=1368)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsi2ss, ROUNDING = 8))]
@@ -242,7 +242,7 @@ pub unsafe fn _mm_cvt_roundsi64_ss<const ROUNDING: i32>(a: __m128, b: i64) -> __
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundu64_ss&expand=1380)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundu64_ss&expand=1380)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtusi2ss, ROUNDING = 8))]
@@ -262,7 +262,7 @@ pub unsafe fn _mm_cvt_roundu64_ss<const ROUNDING: i32>(a: __m128, b: u64) -> __m
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsd_si64&expand=1360)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundsd_si64&expand=1360)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si, ROUNDING = 8))]
@@ -282,7 +282,7 @@ pub unsafe fn _mm_cvt_roundsd_si64<const ROUNDING: i32>(a: __m128d) -> i64 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsd_i64&expand=1358)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundsd_i64&expand=1358)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si, ROUNDING = 8))]
@@ -302,7 +302,7 @@ pub unsafe fn _mm_cvt_roundsd_i64<const ROUNDING: i32>(a: __m128d) -> i64 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsd_u64&expand=1365)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundsd_u64&expand=1365)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2usi, ROUNDING = 8))]
@@ -322,7 +322,7 @@ pub unsafe fn _mm_cvt_roundsd_u64<const ROUNDING: i32>(a: __m128d) -> u64 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundss_si64&expand=1375)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundss_si64&expand=1375)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si, ROUNDING = 8))]
@@ -342,7 +342,7 @@ pub unsafe fn _mm_cvt_roundss_si64<const ROUNDING: i32>(a: __m128) -> i64 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundss_i64&expand=1370)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundss_i64&expand=1370)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si, ROUNDING = 8))]
@@ -362,7 +362,7 @@ pub unsafe fn _mm_cvt_roundss_i64<const ROUNDING: i32>(a: __m128) -> i64 {
/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions\
/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundss_u64&expand=1377)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_roundss_u64&expand=1377)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2usi, ROUNDING = 8))]
@@ -377,7 +377,7 @@ pub unsafe fn _mm_cvt_roundss_u64<const ROUNDING: i32>(a: __m128) -> u64 {
/// Convert the lower double-precision (64-bit) floating-point element in a to a 64-bit integer with truncation, and store the result in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsd_si64&expand=1931)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundsd_si64&expand=1931)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si, SAE = 8))]
@@ -392,7 +392,7 @@ pub unsafe fn _mm_cvtt_roundsd_si64<const SAE: i32>(a: __m128d) -> i64 {
/// Convert the lower double-precision (64-bit) floating-point element in a to a 64-bit integer with truncation, and store the result in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsd_i64&expand=1929)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundsd_i64&expand=1929)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2si, SAE = 8))]
@@ -407,7 +407,7 @@ pub unsafe fn _mm_cvtt_roundsd_i64<const SAE: i32>(a: __m128d) -> i64 {
/// Convert the lower double-precision (64-bit) floating-point element in a to an unsigned 64-bit integer with truncation, and store the result in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsd_u64&expand=1933)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundsd_u64&expand=1933)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtsd2usi, SAE = 8))]
@@ -422,7 +422,7 @@ pub unsafe fn _mm_cvtt_roundsd_u64<const SAE: i32>(a: __m128d) -> u64 {
/// Convert the lower single-precision (32-bit) floating-point element in a to a 64-bit integer with truncation, and store the result in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundss_i64&expand=1935)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundss_i64&expand=1935)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si, SAE = 8))]
@@ -437,7 +437,7 @@ pub unsafe fn _mm_cvtt_roundss_i64<const SAE: i32>(a: __m128) -> i64 {
/// Convert the lower single-precision (32-bit) floating-point element in a to a 64-bit integer with truncation, and store the result in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundss_si64&expand=1937)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundss_si64&expand=1937)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2si, SAE = 8))]
@@ -452,7 +452,7 @@ pub unsafe fn _mm_cvtt_roundss_si64<const SAE: i32>(a: __m128) -> i64 {
/// Convert the lower single-precision (32-bit) floating-point element in a to an unsigned 64-bit integer with truncation, and store the result in dst.\
/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundss_u64&expand=1939)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_roundss_u64&expand=1939)
#[inline]
#[target_feature(enable = "avx512f")]
#[cfg_attr(test, assert_instr(vcvtss2usi, SAE = 8))]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/bmi.rs b/library/stdarch/crates/core_arch/src/x86_64/bmi.rs
index 9f71a8d38..3345b361c 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/bmi.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/bmi.rs
@@ -15,7 +15,7 @@ use stdarch_test::assert_instr;
/// Extracts bits in range [`start`, `start` + `length`) from `a` into
/// the least significant bits of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_bextr_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_bextr_u64)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(bextr))]
@@ -31,7 +31,7 @@ pub unsafe fn _bextr_u64(a: u64, start: u32, len: u32) -> u64 {
/// Bits `[7,0]` of `control` specify the index to the first bit in the range
/// to be extracted, and bits `[15,8]` specify the length of the range.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_bextr2_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_bextr2_u64)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(bextr))]
@@ -43,7 +43,7 @@ pub unsafe fn _bextr2_u64(a: u64, control: u64) -> u64 {
/// Bitwise logical `AND` of inverted `a` with `b`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_andn_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_andn_u64)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(andn))]
@@ -54,7 +54,7 @@ pub unsafe fn _andn_u64(a: u64, b: u64) -> u64 {
/// Extracts lowest set isolated bit.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_blsi_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_blsi_u64)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(blsi))]
@@ -66,7 +66,7 @@ pub unsafe fn _blsi_u64(x: u64) -> u64 {
/// Gets mask up to lowest set bit.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_blsmsk_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_blsmsk_u64)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(blsmsk))]
@@ -80,7 +80,7 @@ pub unsafe fn _blsmsk_u64(x: u64) -> u64 {
///
/// If `x` is sets CF.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_blsr_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_blsr_u64)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(blsr))]
@@ -94,7 +94,7 @@ pub unsafe fn _blsr_u64(x: u64) -> u64 {
///
/// When the source operand is `0`, it returns its size in bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_tzcnt_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_tzcnt_u64)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(tzcnt))]
@@ -107,7 +107,7 @@ pub unsafe fn _tzcnt_u64(x: u64) -> u64 {
///
/// When the source operand is `0`, it returns its size in bits.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_tzcnt_64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_tzcnt_64)
#[inline]
#[target_feature(enable = "bmi1")]
#[cfg_attr(test, assert_instr(tzcnt))]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/bmi2.rs b/library/stdarch/crates/core_arch/src/x86_64/bmi2.rs
index 356d95a3d..efc252d8f 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/bmi2.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/bmi2.rs
@@ -18,7 +18,7 @@ use stdarch_test::assert_instr;
/// Unsigned multiplication of `a` with `b` returning a pair `(lo, hi)` with
/// the low half and the high half of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mulx_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mulx_u64)
#[inline]
#[cfg_attr(test, assert_instr(mul))]
#[target_feature(enable = "bmi2")]
@@ -32,7 +32,7 @@ pub unsafe fn _mulx_u64(a: u64, b: u64, hi: &mut u64) -> u64 {
/// Zeroes higher bits of `a` >= `index`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_bzhi_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_bzhi_u64)
#[inline]
#[target_feature(enable = "bmi2")]
#[cfg_attr(test, assert_instr(bzhi))]
@@ -45,7 +45,7 @@ pub unsafe fn _bzhi_u64(a: u64, index: u32) -> u64 {
/// Scatter contiguous low order bits of `a` to the result at the positions
/// specified by the `mask`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_pdep_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_pdep_u64)
#[inline]
#[target_feature(enable = "bmi2")]
#[cfg_attr(test, assert_instr(pdep))]
@@ -58,7 +58,7 @@ pub unsafe fn _pdep_u64(a: u64, mask: u64) -> u64 {
/// Gathers the bits of `x` specified by the `mask` into the contiguous low
/// order bit positions of the result.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_pext_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_pext_u64)
#[inline]
#[target_feature(enable = "bmi2")]
#[cfg_attr(test, assert_instr(pext))]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/bswap.rs b/library/stdarch/crates/core_arch/src/x86_64/bswap.rs
index 90a209ce3..62cd2948c 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/bswap.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/bswap.rs
@@ -7,7 +7,7 @@ use stdarch_test::assert_instr;
/// Returns an integer with the reversed byte order of x
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_bswap64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_bswap64)
#[inline]
#[cfg_attr(test, assert_instr(bswap))]
#[stable(feature = "simd_x86", since = "1.27.0")]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/cmpxchg16b.rs b/library/stdarch/crates/core_arch/src/x86_64/cmpxchg16b.rs
index a262932af..a4fc0e732 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/cmpxchg16b.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/cmpxchg16b.rs
@@ -16,10 +16,13 @@ use stdarch_test::assert_instr;
///
/// # Memory Orderings
///
-/// This atomic operations has the same semantics of memory orderings as
+/// This atomic operation has the same semantics of memory orderings as
/// `AtomicUsize::compare_exchange` does, only operating on 16 bytes of memory
/// instead of just a pointer.
///
+/// The failure ordering must be [`Ordering::SeqCst`], [`Ordering::Acquire`] or
+/// [`Ordering::Relaxed`].
+///
/// For more information on memory orderings here see the `compare_exchange`
/// documentation for other `Atomic*` types in the standard library.
///
@@ -33,15 +36,11 @@ use stdarch_test::assert_instr;
/// runtime to work correctly. If the CPU running the binary does not actually
/// support `cmpxchg16b` and the program enters an execution path that
/// eventually would reach this function the behavior is undefined.
-///
-/// The failure ordering must be [`Ordering::SeqCst`], [`Ordering::Acquire`] or
-/// [`Ordering::Relaxed`], or this function call is undefined. See the `Atomic*`
-/// documentation's `compare_exchange` function for more information. When
-/// `compare_exchange` panics, this is undefined behavior. Currently this
-/// function aborts the process with an undefined instruction.
#[inline]
+#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
#[cfg_attr(test, assert_instr(cmpxchg16b, success = Ordering::SeqCst, failure = Ordering::SeqCst))]
#[target_feature(enable = "cmpxchg16b")]
+#[stable(feature = "cmpxchg16b_intrinsic", since = "1.67.0")]
pub unsafe fn cmpxchg16b(
dst: *mut u128,
old: u128,
@@ -53,6 +52,8 @@ pub unsafe fn cmpxchg16b(
debug_assert!(dst as usize % 16 == 0);
+ // Copied from `atomic_compare_exchange` in `core`.
+ // https://github.com/rust-lang/rust/blob/f8a2e49/library/core/src/sync/atomic.rs#L3046-L3079
let (val, _ok) = match (success, failure) {
(Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed_relaxed(dst, old, new),
(Relaxed, Acquire) => intrinsics::atomic_cxchg_relaxed_acquire(dst, old, new),
@@ -69,11 +70,12 @@ pub unsafe fn cmpxchg16b(
(SeqCst, Relaxed) => intrinsics::atomic_cxchg_seqcst_relaxed(dst, old, new),
(SeqCst, Acquire) => intrinsics::atomic_cxchg_seqcst_acquire(dst, old, new),
(SeqCst, SeqCst) => intrinsics::atomic_cxchg_seqcst_seqcst(dst, old, new),
+ (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
+ (_, Release) => panic!("there is no such thing as a release failure ordering"),
- // The above block is all copied from libcore, and this statement is
- // also copied from libcore except that it's a panic in libcore and we
- // have a little bit more of a lightweight panic here.
- _ => crate::core_arch::x86::ud2(),
+ // `atomic::Ordering` is non_exhaustive. It warns when `core_arch` is built as a part of `core`.
+ #[allow(unreachable_patterns)]
+ (_, _) => unreachable!(),
};
val
}
diff --git a/library/stdarch/crates/core_arch/src/x86_64/fxsr.rs b/library/stdarch/crates/core_arch/src/x86_64/fxsr.rs
index d02702046..3f52f0d59 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/fxsr.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/fxsr.rs
@@ -22,7 +22,7 @@ extern "C" {
/// [fxsave]: http://www.felixcloutier.com/x86/FXSAVE.html
/// [fxrstor]: http://www.felixcloutier.com/x86/FXRSTOR.html
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_fxsave64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_fxsave64)
#[inline]
#[target_feature(enable = "fxsr")]
#[cfg_attr(test, assert_instr(fxsave64))]
@@ -46,7 +46,7 @@ pub unsafe fn _fxsave64(mem_addr: *mut u8) {
/// [fxsave]: http://www.felixcloutier.com/x86/FXSAVE.html
/// [fxrstor]: http://www.felixcloutier.com/x86/FXRSTOR.html
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_fxrstor64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_fxrstor64)
#[inline]
#[target_feature(enable = "fxsr")]
#[cfg_attr(test, assert_instr(fxrstor64))]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/macros.rs b/library/stdarch/crates/core_arch/src/x86_64/macros.rs
index a3ea0e821..17e1c257c 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/macros.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/macros.rs
@@ -1,36 +1,22 @@
//! Utility macros.
-// Helper struct used to trigger const eval errors when the const generic immediate value `imm` is
+// Helper macro used to trigger const eval errors when the const generic immediate value `imm` is
// not a round number.
-pub(crate) struct ValidateConstRound<const IMM: i32>;
-impl<const IMM: i32> ValidateConstRound<IMM> {
- pub(crate) const VALID: () = {
- assert!(
- IMM == 4 || IMM == 8 || IMM == 9 || IMM == 10 || IMM == 11,
- "Invalid IMM value"
- );
- };
-}
-
#[allow(unused)]
macro_rules! static_assert_rounding {
($imm:ident) => {
- let _ = $crate::core_arch::x86_64::macros::ValidateConstRound::<$imm>::VALID;
+ static_assert!(
+ $imm == 4 || $imm == 8 || $imm == 9 || $imm == 10 || $imm == 11,
+ "Invalid IMM value"
+ )
};
}
-// Helper struct used to trigger const eval errors when the const generic immediate value `imm` is
+// Helper macro used to trigger const eval errors when the const generic immediate value `imm` is
// not a sae number.
-pub(crate) struct ValidateConstSae<const IMM: i32>;
-impl<const IMM: i32> ValidateConstSae<IMM> {
- pub(crate) const VALID: () = {
- assert!(IMM == 4 || IMM == 8, "Invalid IMM value");
- };
-}
-
#[allow(unused)]
macro_rules! static_assert_sae {
($imm:ident) => {
- let _ = $crate::core_arch::x86_64::macros::ValidateConstSae::<$imm>::VALID;
+ static_assert!($imm == 4 || $imm == 8, "Invalid IMM value")
};
}
diff --git a/library/stdarch/crates/core_arch/src/x86_64/rdrand.rs b/library/stdarch/crates/core_arch/src/x86_64/rdrand.rs
index e5ec933fb..c5bb92975 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/rdrand.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/rdrand.rs
@@ -18,7 +18,7 @@ use stdarch_test::assert_instr;
/// Read a hardware generated 64-bit random value and store the result in val.
/// Returns 1 if a random value was generated, and 0 otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rdrand64_step)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_rdrand64_step)
#[inline]
#[target_feature(enable = "rdrand")]
#[cfg_attr(test, assert_instr(rdrand))]
@@ -32,7 +32,7 @@ pub unsafe fn _rdrand64_step(val: &mut u64) -> i32 {
/// Read a 64-bit NIST SP800-90B and SP800-90C compliant random value and store
/// in val. Return 1 if a random value was generated, and 0 otherwise.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_rdseed64_step)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_rdseed64_step)
#[inline]
#[target_feature(enable = "rdseed")]
#[cfg_attr(test, assert_instr(rdseed))]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/sse.rs b/library/stdarch/crates/core_arch/src/x86_64/sse.rs
index ca6799c90..cdadc277d 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/sse.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/sse.rs
@@ -25,7 +25,7 @@ extern "C" {
///
/// This corresponds to the `CVTSS2SI` instruction (with 64 bit output).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_si64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_si64)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cvtss2si))]
@@ -44,7 +44,7 @@ pub unsafe fn _mm_cvtss_si64(a: __m128) -> i64 {
///
/// This corresponds to the `CVTTSS2SI` instruction (with 64 bit output).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_si64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_si64)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cvttss2si))]
@@ -59,7 +59,7 @@ pub unsafe fn _mm_cvttss_si64(a: __m128) -> i64 {
/// This intrinsic corresponds to the `CVTSI2SS` instruction (with 64 bit
/// input).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi64_ss)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_ss)
#[inline]
#[target_feature(enable = "sse")]
#[cfg_attr(test, assert_instr(cvtsi2ss))]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/sse2.rs b/library/stdarch/crates/core_arch/src/x86_64/sse2.rs
index f487a067f..bf2394eba 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/sse2.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/sse2.rs
@@ -19,7 +19,7 @@ extern "C" {
/// Converts the lower double-precision (64-bit) floating-point element in a to
/// a 64-bit integer.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_si64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si64)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtsd2si))]
@@ -30,7 +30,7 @@ pub unsafe fn _mm_cvtsd_si64(a: __m128d) -> i64 {
/// Alias for `_mm_cvtsd_si64`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_si64x)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si64x)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtsd2si))]
@@ -42,7 +42,7 @@ pub unsafe fn _mm_cvtsd_si64x(a: __m128d) -> i64 {
/// Converts the lower double-precision (64-bit) floating-point element in `a`
/// to a 64-bit integer with truncation.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_si64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si64)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvttsd2si))]
@@ -53,7 +53,7 @@ pub unsafe fn _mm_cvttsd_si64(a: __m128d) -> i64 {
/// Alias for `_mm_cvttsd_si64`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_si64x)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si64x)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvttsd2si))]
@@ -66,7 +66,7 @@ pub unsafe fn _mm_cvttsd_si64x(a: __m128d) -> i64 {
/// To minimize caching, the data is flagged as non-temporal (unlikely to be
/// used again soon).
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_si64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si64)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(movnti))]
@@ -78,7 +78,7 @@ pub unsafe fn _mm_stream_si64(mem_addr: *mut i64, a: i64) {
/// Returns a vector whose lowest element is `a` and all higher elements are
/// `0`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi64_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(all(test, not(windows)), assert_instr(movq))]
@@ -90,7 +90,7 @@ pub unsafe fn _mm_cvtsi64_si128(a: i64) -> __m128i {
/// Returns a vector whose lowest element is `a` and all higher elements are
/// `0`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi64x_si128)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64x_si128)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(all(test, not(windows)), assert_instr(movq))]
@@ -101,7 +101,7 @@ pub unsafe fn _mm_cvtsi64x_si128(a: i64) -> __m128i {
/// Returns the lowest element of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(all(test, not(windows)), assert_instr(movq))]
@@ -112,7 +112,7 @@ pub unsafe fn _mm_cvtsi128_si64(a: __m128i) -> i64 {
/// Returns the lowest element of `a`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si64x)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64x)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(all(test, not(windows)), assert_instr(movq))]
@@ -124,7 +124,7 @@ pub unsafe fn _mm_cvtsi128_si64x(a: __m128i) -> i64 {
/// Returns `a` with its lower element replaced by `b` after converting it to
/// an `f64`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi64_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtsi2sd))]
@@ -136,7 +136,7 @@ pub unsafe fn _mm_cvtsi64_sd(a: __m128d, b: i64) -> __m128d {
/// Returns `a` with its lower element replaced by `b` after converting it to
/// an `f64`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi64x_sd)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64x_sd)
#[inline]
#[target_feature(enable = "sse2")]
#[cfg_attr(test, assert_instr(cvtsi2sd))]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/sse41.rs b/library/stdarch/crates/core_arch/src/x86_64/sse41.rs
index 3d1ea0cf6..d815a69a7 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/sse41.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/sse41.rs
@@ -10,28 +10,28 @@ use stdarch_test::assert_instr;
/// Extracts an 64-bit integer from `a` selected with `IMM1`
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_extract_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi64)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(all(test, not(target_os = "windows")), assert_instr(pextrq, IMM1 = 1))]
#[rustc_legacy_const_generics(1)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_extract_epi64<const IMM1: i32>(a: __m128i) -> i64 {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
simd_extract(a.as_i64x2(), IMM1 as u32)
}
/// Returns a copy of `a` with the 64-bit integer from `i` inserted at a
/// location specified by `IMM1`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_insert_epi64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi64)
#[inline]
#[target_feature(enable = "sse4.1")]
#[cfg_attr(test, assert_instr(pinsrq, IMM1 = 0))]
#[rustc_legacy_const_generics(2)]
#[stable(feature = "simd_x86", since = "1.27.0")]
pub unsafe fn _mm_insert_epi64<const IMM1: i32>(a: __m128i, i: i64) -> __m128i {
- static_assert_imm1!(IMM1);
+ static_assert_uimm_bits!(IMM1, 1);
transmute(simd_insert(a.as_i64x2(), IMM1 as u32, i))
}
diff --git a/library/stdarch/crates/core_arch/src/x86_64/sse42.rs b/library/stdarch/crates/core_arch/src/x86_64/sse42.rs
index 6b5d087c1..164def433 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/sse42.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/sse42.rs
@@ -12,7 +12,7 @@ extern "C" {
/// Starting with the initial value in `crc`, return the accumulated
/// CRC32-C value for unsigned 64-bit integer `v`.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_crc32_u64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u64)
#[inline]
#[target_feature(enable = "sse4.2")]
#[cfg_attr(test, assert_instr(crc32))]
diff --git a/library/stdarch/crates/core_arch/src/x86_64/xsave.rs b/library/stdarch/crates/core_arch/src/x86_64/xsave.rs
index 2afd3e433..7a6ccda90 100644
--- a/library/stdarch/crates/core_arch/src/x86_64/xsave.rs
+++ b/library/stdarch/crates/core_arch/src/x86_64/xsave.rs
@@ -30,7 +30,7 @@ extern "C" {
/// The format of the XSAVE area is detailed in Section 13.4, “XSAVE Area,” of
/// Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 1.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xsave64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xsave64)
#[inline]
#[target_feature(enable = "xsave")]
#[cfg_attr(test, assert_instr(xsave64))]
@@ -46,7 +46,7 @@ pub unsafe fn _xsave64(mem_addr: *mut u8, save_mask: u64) {
/// `mem_addr.HEADER.XSTATE_BV`. `mem_addr` must be aligned on a 64-byte
/// boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xrstor64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xrstor64)
#[inline]
#[target_feature(enable = "xsave")]
#[cfg_attr(test, assert_instr(xrstor64))]
@@ -63,7 +63,7 @@ pub unsafe fn _xrstor64(mem_addr: *const u8, rs_mask: u64) {
/// the manner in which data is saved. The performance of this instruction will
/// be equal to or better than using the `XSAVE64` instruction.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xsaveopt64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xsaveopt64)
#[inline]
#[target_feature(enable = "xsave,xsaveopt")]
#[cfg_attr(test, assert_instr(xsaveopt64))]
@@ -79,7 +79,7 @@ pub unsafe fn _xsaveopt64(mem_addr: *mut u8, save_mask: u64) {
/// use init optimization. State is saved based on bits `[62:0]` in `save_mask`
/// and `XCR0`. `mem_addr` must be aligned on a 64-byte boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xsavec64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xsavec64)
#[inline]
#[target_feature(enable = "xsave,xsavec")]
#[cfg_attr(test, assert_instr(xsavec64))]
@@ -96,7 +96,7 @@ pub unsafe fn _xsavec64(mem_addr: *mut u8, save_mask: u64) {
/// modified optimization. State is saved based on bits `[62:0]` in `save_mask`
/// and `XCR0`. `mem_addr` must be aligned on a 64-byte boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xsaves64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xsaves64)
#[inline]
#[target_feature(enable = "xsave,xsaves")]
#[cfg_attr(test, assert_instr(xsaves64))]
@@ -115,7 +115,7 @@ pub unsafe fn _xsaves64(mem_addr: *mut u8, save_mask: u64) {
/// `mem_addr.HEADER.XSTATE_BV`. `mem_addr` must be aligned on a 64-byte
/// boundary.
///
-/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_xrstors64)
+/// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_xrstors64)
#[inline]
#[target_feature(enable = "xsave,xsaves")]
#[cfg_attr(test, assert_instr(xrstors64))]
diff --git a/library/stdarch/crates/core_arch/tests/cpu-detection.rs b/library/stdarch/crates/core_arch/tests/cpu-detection.rs
deleted file mode 100644
index 08caca738..000000000
--- a/library/stdarch/crates/core_arch/tests/cpu-detection.rs
+++ /dev/null
@@ -1,72 +0,0 @@
-#![feature(stdsimd)]
-#![allow(clippy::unwrap_used, clippy::print_stdout, clippy::use_debug)]
-
-#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-#[macro_use]
-extern crate std_detect;
-
-#[test]
-#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-fn x86_all() {
- println!("sse: {:?}", is_x86_feature_detected!("sse"));
- println!("sse2: {:?}", is_x86_feature_detected!("sse2"));
- println!("sse3: {:?}", is_x86_feature_detected!("sse3"));
- println!("ssse3: {:?}", is_x86_feature_detected!("ssse3"));
- println!("sse4.1: {:?}", is_x86_feature_detected!("sse4.1"));
- println!("sse4.2: {:?}", is_x86_feature_detected!("sse4.2"));
- println!("sse4a: {:?}", is_x86_feature_detected!("sse4a"));
- println!("avx: {:?}", is_x86_feature_detected!("avx"));
- println!("avx2: {:?}", is_x86_feature_detected!("avx2"));
- println!("avx512f {:?}", is_x86_feature_detected!("avx512f"));
- println!("avx512cd {:?}", is_x86_feature_detected!("avx512cd"));
- println!("avx512er {:?}", is_x86_feature_detected!("avx512er"));
- println!("avx512pf {:?}", is_x86_feature_detected!("avx512pf"));
- println!("avx512bw {:?}", is_x86_feature_detected!("avx512bw"));
- println!("avx512dq {:?}", is_x86_feature_detected!("avx512dq"));
- println!("avx512vl {:?}", is_x86_feature_detected!("avx512vl"));
- println!("avx512_ifma {:?}", is_x86_feature_detected!("avx512ifma"));
- println!("avx512_vbmi {:?}", is_x86_feature_detected!("avx512vbmi"));
- println!(
- "avx512_vpopcntdq {:?}",
- is_x86_feature_detected!("avx512vpopcntdq")
- );
- println!("avx512vbmi2 {:?}", is_x86_feature_detected!("avx512vbmi2"));
- println!("gfni {:?}", is_x86_feature_detected!("gfni"));
- println!("vaes {:?}", is_x86_feature_detected!("vaes"));
- println!("vpclmulqdq {:?}", is_x86_feature_detected!("vpclmulqdq"));
- println!("avx512vnni {:?}", is_x86_feature_detected!("avx512vnni"));
- println!(
- "avx512bitalg {:?}",
- is_x86_feature_detected!("avx512bitalg")
- );
- println!("avx512bf16 {:?}", is_x86_feature_detected!("avx512bf16"));
- println!(
- "avx512vp2intersect {:?}",
- is_x86_feature_detected!("avx512vp2intersect")
- );
- println!("f16c: {:?}", is_x86_feature_detected!("f16c"));
- println!("fma: {:?}", is_x86_feature_detected!("fma"));
- println!("abm: {:?}", is_x86_feature_detected!("abm"));
- println!("bmi: {:?}", is_x86_feature_detected!("bmi1"));
- println!("bmi2: {:?}", is_x86_feature_detected!("bmi2"));
- println!("tbm: {:?}", is_x86_feature_detected!("tbm"));
- println!("popcnt: {:?}", is_x86_feature_detected!("popcnt"));
- println!("lzcnt: {:?}", is_x86_feature_detected!("lzcnt"));
- println!("fxsr: {:?}", is_x86_feature_detected!("fxsr"));
- println!("xsave: {:?}", is_x86_feature_detected!("xsave"));
- println!("xsaveopt: {:?}", is_x86_feature_detected!("xsaveopt"));
- println!("xsaves: {:?}", is_x86_feature_detected!("xsaves"));
- println!("xsavec: {:?}", is_x86_feature_detected!("xsavec"));
-}
-
-#[test]
-#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-#[allow(deprecated)]
-fn x86_deprecated() {
- println!("avx512gfni {:?}", is_x86_feature_detected!("avx512gfni"));
- println!("avx512vaes {:?}", is_x86_feature_detected!("avx512vaes"));
- println!(
- "avx512vpclmulqdq {:?}",
- is_x86_feature_detected!("avx512vpclmulqdq")
- );
-}
diff --git a/library/stdarch/crates/intrinsic-test/Cargo.toml b/library/stdarch/crates/intrinsic-test/Cargo.toml
index 7efbab755..9b6162ab8 100644
--- a/library/stdarch/crates/intrinsic-test/Cargo.toml
+++ b/library/stdarch/crates/intrinsic-test/Cargo.toml
@@ -3,6 +3,7 @@ name = "intrinsic-test"
version = "0.1.0"
authors = ["Jamie Cunliffe <Jamie.Cunliffe@arm.com>"]
edition = "2021"
+license = "MIT OR Apache-2.0"
[dependencies]
lazy_static = "1.4.0"
diff --git a/library/stdarch/crates/intrinsic-test/LICENSE-APACHE b/library/stdarch/crates/intrinsic-test/LICENSE-APACHE
new file mode 100644
index 000000000..16fe87b06
--- /dev/null
+++ b/library/stdarch/crates/intrinsic-test/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/library/stdarch/crates/intrinsic-test/LICENSE-MIT b/library/stdarch/crates/intrinsic-test/LICENSE-MIT
new file mode 100644
index 000000000..ef223ae2c
--- /dev/null
+++ b/library/stdarch/crates/intrinsic-test/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2021-2023 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs b/library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs
index 7336c9e8b..d21041676 100644
--- a/library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs
+++ b/library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs
@@ -6,9 +6,53 @@ use crate::argument::{Argument, ArgumentList, Constraint};
use crate::intrinsic::Intrinsic;
use crate::types::{IntrinsicType, TypeKind};
-pub fn get_acle_intrinsics(filename: &str) -> Vec<Intrinsic> {
+pub struct CsvMetadata {
+ notices: String,
+ spdx_lic: String,
+}
+
+impl CsvMetadata {
+ fn new<'a>(header: impl Iterator<Item = &'a str>) -> Self {
+ lazy_static! {
+ static ref SPDX_LICENSE_IDENTIFIER: Regex =
+ Regex::new(r#"SPDX-License-Identifier:(.*)"#).unwrap();
+ }
+
+ let notices = header.map(|line| format!("{line}\n")).collect::<String>();
+ let spdx_lic = match SPDX_LICENSE_IDENTIFIER
+ .captures_iter(&notices)
+ .exactly_one()
+ {
+ Ok(caps) => {
+ let cap = caps.get(1).unwrap().as_str().trim();
+ // Ensure that (unlikely) ACLE licence changes don't go unnoticed.
+ assert_eq!(cap, "Apache-2.0");
+ cap.to_string()
+ }
+ Err(caps_iter) => panic!(
+ "Expected exactly one SPDX-License-Identifier, found {}.",
+ caps_iter.count()
+ ),
+ };
+
+ Self { notices, spdx_lic }
+ }
+
+ pub fn spdx_license_identifier(&self) -> &str {
+ self.spdx_lic.as_str()
+ }
+
+ pub fn notices_lines(&self) -> impl Iterator<Item = &str> {
+ self.notices.lines()
+ }
+}
+
+pub fn get_acle_intrinsics(filename: &str) -> (CsvMetadata, Vec<Intrinsic>) {
let data = std::fs::read_to_string(filename).expect("Failed to open ACLE intrinsics file");
+ let comment_header = data.lines().map_while(|l| l.strip_prefix("<COMMENT>\t"));
+ let meta = CsvMetadata::new(comment_header);
+
let data = data
.lines()
.filter_map(|l| {
@@ -51,7 +95,7 @@ pub fn get_acle_intrinsics(filename: &str) -> Vec<Intrinsic> {
}
}
- intrinsics.to_vec()
+ (meta, intrinsics.to_vec())
}
impl Into<Intrinsic> for ACLEIntrinsicLine {
diff --git a/library/stdarch/crates/intrinsic-test/src/main.rs b/library/stdarch/crates/intrinsic-test/src/main.rs
index dac934574..5a29c4767 100644
--- a/library/stdarch/crates/intrinsic-test/src/main.rs
+++ b/library/stdarch/crates/intrinsic-test/src/main.rs
@@ -14,7 +14,7 @@ use itertools::Itertools;
use rayon::prelude::*;
use types::TypeKind;
-use crate::acle_csv_parser::get_acle_intrinsics;
+use crate::acle_csv_parser::{get_acle_intrinsics, CsvMetadata};
use crate::argument::Argument;
mod acle_csv_parser;
@@ -70,6 +70,7 @@ fn gen_code_c(
}
fn generate_c_program(
+ notices: &str,
header_files: &[&str],
intrinsic: &Intrinsic,
p64_armv7_workaround: bool,
@@ -81,7 +82,7 @@ fn generate_c_program(
.collect_vec();
format!(
- r#"{header_files}
+ r#"{notices}{header_files}
#include <iostream>
#include <cstring>
#include <iomanip>
@@ -157,7 +158,7 @@ fn gen_code_rust(intrinsic: &Intrinsic, constraints: &[&Argument], name: String)
}
}
-fn generate_rust_program(intrinsic: &Intrinsic, a32: bool) -> String {
+fn generate_rust_program(notices: &str, intrinsic: &Intrinsic, a32: bool) -> String {
let constraints = intrinsic
.arguments
.iter()
@@ -165,7 +166,7 @@ fn generate_rust_program(intrinsic: &Intrinsic, a32: bool) -> String {
.collect_vec();
format!(
- r#"#![feature(simd_ffi)]
+ r#"{notices}#![feature(simd_ffi)]
#![feature(link_llvm_intrinsics)]
#![feature(stdsimd)]
#![allow(overflowing_literals)]
@@ -217,7 +218,23 @@ fn compile_c(c_filename: &str, intrinsic: &Intrinsic, compiler: &str, a32: bool)
}
}
-fn build_c(intrinsics: &Vec<Intrinsic>, compiler: &str, a32: bool) -> bool {
+fn build_notices(csv_metadata: &CsvMetadata, line_prefix: &str) -> String {
+ let mut notices = format!(
+ "\
+{line_prefix}This is a transient test file, not intended for distribution. Some aspects of the
+{line_prefix}test are derived from a CSV specification, published with the following notices:
+{line_prefix}
+"
+ );
+ let lines = csv_metadata
+ .notices_lines()
+ .map(|line| format!("{line_prefix} {line}\n"));
+ notices.extend(lines);
+ notices.push_str("\n");
+ notices
+}
+
+fn build_c(notices: &str, intrinsics: &Vec<Intrinsic>, compiler: &str, a32: bool) -> bool {
let _ = std::fs::create_dir("c_programs");
intrinsics
.par_iter()
@@ -225,7 +242,7 @@ fn build_c(intrinsics: &Vec<Intrinsic>, compiler: &str, a32: bool) -> bool {
let c_filename = format!(r#"c_programs/{}.cpp"#, i.name);
let mut file = File::create(&c_filename).unwrap();
- let c_code = generate_c_program(&["arm_neon.h", "arm_acle.h"], &i, a32);
+ let c_code = generate_c_program(notices, &["arm_neon.h", "arm_acle.h"], &i, a32);
file.write_all(c_code.into_bytes().as_slice()).unwrap();
compile_c(&c_filename, &i, compiler, a32)
})
@@ -233,14 +250,20 @@ fn build_c(intrinsics: &Vec<Intrinsic>, compiler: &str, a32: bool) -> bool {
.is_none()
}
-fn build_rust(intrinsics: &Vec<Intrinsic>, toolchain: &str, a32: bool) -> bool {
+fn build_rust(
+ notices: &str,
+ spdx_lic: &str,
+ intrinsics: &Vec<Intrinsic>,
+ toolchain: &str,
+ a32: bool,
+) -> bool {
intrinsics.iter().for_each(|i| {
let rust_dir = format!(r#"rust_programs/{}"#, i.name);
let _ = std::fs::create_dir_all(&rust_dir);
let rust_filename = format!(r#"{rust_dir}/main.rs"#);
let mut file = File::create(&rust_filename).unwrap();
- let c_code = generate_rust_program(&i, a32);
+ let c_code = generate_rust_program(notices, &i, a32);
file.write_all(c_code.into_bytes().as_slice()).unwrap();
});
@@ -249,9 +272,10 @@ fn build_rust(intrinsics: &Vec<Intrinsic>, toolchain: &str, a32: bool) -> bool {
.write_all(
format!(
r#"[package]
-name = "intrinsic-test"
+name = "intrinsic-test-programs"
version = "{version}"
authors = ["{authors}"]
+license = "{spdx_lic}"
edition = "2018"
[workspace]
[dependencies]
@@ -371,7 +395,7 @@ fn main() {
};
let a32 = matches.is_present("A32");
- let intrinsics = get_acle_intrinsics(filename);
+ let (csv_metadata, intrinsics) = get_acle_intrinsics(filename);
let mut intrinsics = intrinsics
.into_iter()
@@ -394,11 +418,14 @@ fn main() {
.collect::<Vec<_>>();
intrinsics.dedup();
- if !build_c(&intrinsics, cpp_compiler, a32) {
+ let notices = build_notices(&csv_metadata, "// ");
+ let spdx_lic = csv_metadata.spdx_license_identifier();
+
+ if !build_c(&notices, &intrinsics, cpp_compiler, a32) {
std::process::exit(2);
}
- if !build_rust(&intrinsics, &toolchain, a32) {
+ if !build_rust(&notices, spdx_lic, &intrinsics, &toolchain, a32) {
std::process::exit(3);
}
diff --git a/library/stdarch/crates/std_detect/README.md b/library/stdarch/crates/std_detect/README.md
index bea7d941a..71f474d65 100644
--- a/library/stdarch/crates/std_detect/README.md
+++ b/library/stdarch/crates/std_detect/README.md
@@ -30,6 +30,8 @@ run-time feature detection. When this feature is disabled, `std_detect` assumes
that [`getauxval`] is linked to the binary. If that is not the case the behavior
is undefined.
+ Note: This feature is ignored on `*-linux-gnu*` targets, since all `*-linux-gnu*` targets ([since Rust 1.64](https://blog.rust-lang.org/2022/08/01/Increasing-glibc-kernel-requirements.html)) have glibc requirements higher than [glibc 2.16 that added `getauxval`](https://sourceware.org/legacy-ml/libc-announce/2012/msg00000.html), and we can safely assume [`getauxval`] is linked to the binary.
+
* `std_detect_file_io` (enabled by default, requires `std`): Enable to perform run-time feature
detection using file APIs (e.g. `/proc/cpuinfo`, etc.) if other more performant
methods fail. This feature requires `libstd` as a dependency, preventing the
@@ -44,17 +46,25 @@ crate from working on applications in which `std` is not available.
the operating system. `std_detect` assumes that the binary is an user-space
application. If you need raw support for querying `cpuid`, consider using the
[`cupid`](https://crates.io/crates/cupid) crate.
-
-* Linux:
- * `arm{32, 64}`, `mips{32,64}{,el}`, `powerpc{32,64}{,le}`: `std_detect`
+
+* Linux/Android:
+ * `arm{32, 64}`, `mips{32,64}{,el}`, `powerpc{32,64}{,le}`, `riscv{32,64}`: `std_detect`
supports these on Linux by querying ELF auxiliary vectors (using `getauxval`
- when available), and if that fails, by querying `/proc/cpuinfo`.
+ when available), and if that fails, by querying `/proc/cpuinfo`.
* `arm64`: partial support for doing run-time feature detection by directly
querying `mrs` is implemented for Linux >= 4.11, but not enabled by default.
* FreeBSD:
+ * `arm32`, `powerpc64`: `std_detect` supports these on FreeBSD by querying ELF
+ auxiliary vectors using `sysctl`.
* `arm64`: run-time feature detection is implemented by directly querying `mrs`.
+* OpenBSD:
+ * `arm64`: run-time feature detection is implemented by querying `sysctl`.
+
+* Windows:
+ * `arm64`: run-time feature detection is implemented by querying `IsProcessorFeaturePresent`.
+
# License
This project is licensed under either of
diff --git a/library/stdarch/crates/std_detect/src/detect/arch/x86.rs b/library/stdarch/crates/std_detect/src/detect/arch/x86.rs
index d0bf92d3e..828ac5c38 100644
--- a/library/stdarch/crates/std_detect/src/detect/arch/x86.rs
+++ b/library/stdarch/crates/std_detect/src/detect/arch/x86.rs
@@ -91,6 +91,8 @@ features! {
/// * `"cmpxchg16b"`
/// * `"adx"`
/// * `"rtm"`
+ /// * `"movbe"`
+ /// * `"ermsb"`
///
/// [docs]: https://software.intel.com/sites/landingpage/IntrinsicsGuide
#[stable(feature = "simd_x86", since = "1.27.0")]
@@ -197,4 +199,8 @@ features! {
/// ADX, Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] rtm: "rtm";
/// RTM, Intel (Restricted Transactional Memory)
+ @FEATURE: #[stable(feature = "movbe_target_feature", since = "1.67.0")] movbe: "movbe";
+ /// MOVBE (Move Data After Swapping Bytes)
+ @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] ermsb: "ermsb";
+ /// ERMSB, Enhanced REP MOVSB and STOSB
}
diff --git a/library/stdarch/crates/std_detect/src/detect/mod.rs b/library/stdarch/crates/std_detect/src/detect/mod.rs
index 9a135c90a..db7018232 100644
--- a/library/stdarch/crates/std_detect/src/detect/mod.rs
+++ b/library/stdarch/crates/std_detect/src/detect/mod.rs
@@ -56,6 +56,12 @@ cfg_if! {
mod aarch64;
#[path = "os/freebsd/mod.rs"]
mod os;
+ } else if #[cfg(all(target_os = "openbsd", target_arch = "aarch64", feature = "libc"))] {
+ #[allow(dead_code)] // we don't use code that calls the mrs instruction.
+ #[path = "os/aarch64.rs"]
+ mod aarch64;
+ #[path = "os/openbsd/aarch64.rs"]
+ mod os;
} else if #[cfg(all(target_os = "windows", target_arch = "aarch64"))] {
#[path = "os/windows/aarch64.rs"]
mod os;
diff --git a/library/stdarch/crates/std_detect/src/detect/os/aarch64.rs b/library/stdarch/crates/std_detect/src/detect/os/aarch64.rs
index e0e62ee33..5790f0168 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/aarch64.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/aarch64.rs
@@ -15,6 +15,7 @@
//!
//! - [Zircon implementation](https://fuchsia.googlesource.com/zircon/+/master/kernel/arch/arm64/feature.cpp)
//! - [Linux documentation](https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt)
+//! - [ARM documentation](https://developer.arm.com/documentation/ddi0601/2022-12/AArch64-Registers?lang=en)
use crate::detect::{cache, Feature};
use core::arch::asm;
@@ -23,40 +24,71 @@ use core::arch::asm;
///
/// This will cause SIGILL if the current OS is not trapping the mrs instruction.
pub(crate) fn detect_features() -> cache::Initializer {
- let mut value = cache::Initializer::default();
+ // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0
+ let aa64isar0: u64;
+ unsafe {
+ asm!(
+ "mrs {}, ID_AA64ISAR0_EL1",
+ out(reg) aa64isar0,
+ options(pure, nomem, preserves_flags, nostack)
+ );
+ }
- {
- let mut enable_feature = |f, enable| {
- if enable {
- value.set(f as u32);
- }
- };
+ // ID_AA64ISAR1_EL1 - Instruction Set Attribute Register 1
+ let aa64isar1: u64;
+ unsafe {
+ asm!(
+ "mrs {}, ID_AA64ISAR1_EL1",
+ out(reg) aa64isar1,
+ options(pure, nomem, preserves_flags, nostack)
+ );
+ }
- // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0
- let aa64isar0: u64;
- unsafe {
- asm!(
- "mrs {}, ID_AA64ISAR0_EL1",
- out(reg) aa64isar0,
- options(pure, nomem, preserves_flags, nostack)
- );
- }
+ // ID_AA64MMFR2_EL1 - AArch64 Memory Model Feature Register 2
+ let aa64mmfr2: u64;
+ unsafe {
+ asm!(
+ "mrs {}, ID_AA64MMFR2_EL1",
+ out(reg) aa64mmfr2,
+ options(pure, nomem, preserves_flags, nostack)
+ );
+ }
+
+ // ID_AA64PFR0_EL1 - Processor Feature Register 0
+ let aa64pfr0: u64;
+ unsafe {
+ asm!(
+ "mrs {}, ID_AA64PFR0_EL1",
+ out(reg) aa64pfr0,
+ options(pure, nomem, preserves_flags, nostack)
+ );
+ }
- enable_feature(Feature::pmull, bits_shift(aa64isar0, 7, 4) >= 2);
- enable_feature(Feature::tme, bits_shift(aa64isar0, 27, 24) == 1);
- enable_feature(Feature::lse, bits_shift(aa64isar0, 23, 20) >= 1);
- enable_feature(Feature::crc, bits_shift(aa64isar0, 19, 16) >= 1);
+ parse_system_registers(aa64isar0, aa64isar1, aa64mmfr2, Some(aa64pfr0))
+}
+
+pub(crate) fn parse_system_registers(
+ aa64isar0: u64,
+ aa64isar1: u64,
+ aa64mmfr2: u64,
+ aa64pfr0: Option<u64>,
+) -> cache::Initializer {
+ let mut value = cache::Initializer::default();
- // ID_AA64PFR0_EL1 - Processor Feature Register 0
- let aa64pfr0: u64;
- unsafe {
- asm!(
- "mrs {}, ID_AA64PFR0_EL1",
- out(reg) aa64pfr0,
- options(pure, nomem, preserves_flags, nostack)
- );
+ let mut enable_feature = |f, enable| {
+ if enable {
+ value.set(f as u32);
}
+ };
+ // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0
+ enable_feature(Feature::pmull, bits_shift(aa64isar0, 7, 4) >= 2);
+ enable_feature(Feature::tme, bits_shift(aa64isar0, 27, 24) == 1);
+ enable_feature(Feature::lse, bits_shift(aa64isar0, 23, 20) >= 2);
+ enable_feature(Feature::crc, bits_shift(aa64isar0, 19, 16) >= 1);
+
+ // ID_AA64PFR0_EL1 - Processor Feature Register 0
+ if let Some(aa64pfr0) = aa64pfr0 {
let fp = bits_shift(aa64pfr0, 19, 16) < 0xF;
let fphp = bits_shift(aa64pfr0, 19, 16) >= 1;
let asimd = bits_shift(aa64pfr0, 23, 20) < 0xF;
@@ -77,23 +109,17 @@ pub(crate) fn detect_features() -> cache::Initializer {
asimd && bits_shift(aa64isar0, 47, 44) >= 1,
);
enable_feature(Feature::sve, asimd && bits_shift(aa64pfr0, 35, 32) >= 1);
+ }
- // ID_AA64ISAR1_EL1 - Instruction Set Attribute Register 1
- let aa64isar1: u64;
- unsafe {
- asm!(
- "mrs {}, ID_AA64ISAR1_EL1",
- out(reg) aa64isar1,
- options(pure, nomem, preserves_flags, nostack)
- );
- }
+ // ID_AA64ISAR1_EL1 - Instruction Set Attribute Register 1
+ // Check for either APA or API field
+ enable_feature(Feature::paca, bits_shift(aa64isar1, 11, 4) >= 1);
+ enable_feature(Feature::rcpc, bits_shift(aa64isar1, 23, 20) >= 1);
+ // Check for either GPA or GPI field
+ enable_feature(Feature::pacg, bits_shift(aa64isar1, 31, 24) >= 1);
- // Check for either APA or API field
- enable_feature(Feature::paca, bits_shift(aa64isar1, 11, 4) >= 1);
- enable_feature(Feature::rcpc, bits_shift(aa64isar1, 23, 20) >= 1);
- // Check for either GPA or GPI field
- enable_feature(Feature::pacg, bits_shift(aa64isar1, 31, 24) >= 1);
- }
+ // ID_AA64MMFR2_EL1 - AArch64 Memory Model Feature Register 2
+ enable_feature(Feature::lse2, bits_shift(aa64mmfr2, 35, 32) >= 1);
value
}
diff --git a/library/stdarch/crates/std_detect/src/detect/os/freebsd/aarch64.rs b/library/stdarch/crates/std_detect/src/detect/os/freebsd/aarch64.rs
index 7d972b373..ccc48f536 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/freebsd/aarch64.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/freebsd/aarch64.rs
@@ -1,21 +1,3 @@
//! Run-time feature detection for Aarch64 on FreeBSD.
pub(crate) use super::super::aarch64::detect_features;
-
-#[cfg(test)]
-mod tests {
- #[test]
- fn dump() {
- println!("asimd: {:?}", is_aarch64_feature_detected!("asimd"));
- println!("pmull: {:?}", is_aarch64_feature_detected!("pmull"));
- println!("fp: {:?}", is_aarch64_feature_detected!("fp"));
- println!("fp16: {:?}", is_aarch64_feature_detected!("fp16"));
- println!("sve: {:?}", is_aarch64_feature_detected!("sve"));
- println!("crc: {:?}", is_aarch64_feature_detected!("crc"));
- println!("lse: {:?}", is_aarch64_feature_detected!("lse"));
- println!("rdm: {:?}", is_aarch64_feature_detected!("rdm"));
- println!("rcpc: {:?}", is_aarch64_feature_detected!("rcpc"));
- println!("dotprod: {:?}", is_aarch64_feature_detected!("dotprod"));
- println!("tme: {:?}", is_aarch64_feature_detected!("tme"));
- }
-}
diff --git a/library/stdarch/crates/std_detect/src/detect/os/freebsd/arm.rs b/library/stdarch/crates/std_detect/src/detect/os/freebsd/arm.rs
index 4c9d763b4..97ede1d26 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/freebsd/arm.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/freebsd/arm.rs
@@ -3,6 +3,15 @@
use super::auxvec;
use crate::detect::{cache, Feature};
+// Defined in machine/elf.h.
+// https://github.com/freebsd/freebsd-src/blob/deb63adf945d446ed91a9d84124c71f15ae571d1/sys/arm/include/elf.h
+const HWCAP_NEON: usize = 0x00001000;
+const HWCAP2_AES: usize = 0x00000001;
+const HWCAP2_PMULL: usize = 0x00000002;
+const HWCAP2_SHA1: usize = 0x00000004;
+const HWCAP2_SHA2: usize = 0x00000008;
+const HWCAP2_CRC32: usize = 0x00000010;
+
/// Try to read the features from the auxiliary vector
pub(crate) fn detect_features() -> cache::Initializer {
let mut value = cache::Initializer::default();
@@ -13,8 +22,17 @@ pub(crate) fn detect_features() -> cache::Initializer {
};
if let Ok(auxv) = auxvec::auxv() {
- enable_feature(&mut value, Feature::neon, auxv.hwcap & 0x00001000 != 0);
- enable_feature(&mut value, Feature::pmull, auxv.hwcap2 & 0x00000002 != 0);
+ enable_feature(&mut value, Feature::neon, auxv.hwcap & HWCAP_NEON != 0);
+ let pmull = auxv.hwcap2 & HWCAP2_PMULL != 0;
+ enable_feature(&mut value, Feature::pmull, pmull);
+ enable_feature(&mut value, Feature::crc, auxv.hwcap2 & HWCAP2_CRC32 != 0);
+ let aes = auxv.hwcap2 & HWCAP2_AES != 0;
+ enable_feature(&mut value, Feature::aes, aes);
+ // SHA2 requires SHA1 & SHA2 features
+ let sha1 = auxv.hwcap2 & HWCAP2_SHA1 != 0;
+ let sha2 = auxv.hwcap2 & HWCAP2_SHA2 != 0;
+ enable_feature(&mut value, Feature::sha2, sha1 && sha2);
+ enable_feature(&mut value, Feature::crypto, aes && pmull && sha1 && sha2);
return value;
}
value
diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs
index a75185d43..caaa39f14 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs
@@ -6,14 +6,34 @@ use crate::detect::{bit, cache, Feature};
/// Try to read the features from the auxiliary vector, and if that fails, try
/// to read them from /proc/cpuinfo.
pub(crate) fn detect_features() -> cache::Initializer {
+ #[cfg(target_os = "android")]
+ let is_exynos9810 = {
+ // Samsung Exynos 9810 has a bug that big and little cores have different
+ // ISAs. And on older Android (pre-9), the kernel incorrectly reports
+ // that features available only on some cores are available on all cores.
+ // https://reviews.llvm.org/D114523
+ let mut arch = [0_u8; libc::PROP_VALUE_MAX as usize];
+ let len = unsafe {
+ libc::__system_property_get(
+ b"ro.arch\0".as_ptr() as *const libc::c_char,
+ arch.as_mut_ptr() as *mut libc::c_char,
+ )
+ };
+ // On Exynos, ro.arch is not available on Android 12+, but it is fine
+ // because Android 9+ includes the fix.
+ len > 0 && arch.starts_with(b"exynos9810")
+ };
+ #[cfg(not(target_os = "android"))]
+ let is_exynos9810 = false;
+
if let Ok(auxv) = auxvec::auxv() {
let hwcap: AtHwcap = auxv.into();
- return hwcap.cache();
+ return hwcap.cache(is_exynos9810);
}
#[cfg(feature = "std_detect_file_io")]
if let Ok(c) = super::cpuinfo::CpuInfo::new() {
let hwcap: AtHwcap = c.into();
- return hwcap.cache();
+ return hwcap.cache(is_exynos9810);
}
cache::Initializer::default()
}
@@ -207,9 +227,9 @@ impl From<super::cpuinfo::CpuInfo> for AtHwcap {
impl AtHwcap {
/// Initializes the cache from the feature -bits.
///
- /// The feature dependencies here come directly from LLVM's feature definintions:
+ /// The feature dependencies here come directly from LLVM's feature definitions:
/// https://github.com/llvm/llvm-project/blob/main/llvm/lib/Target/AArch64/AArch64.td
- fn cache(self) -> cache::Initializer {
+ fn cache(self, is_exynos9810: bool) -> cache::Initializer {
let mut value = cache::Initializer::default();
{
let mut enable_feature = |f, enable| {
@@ -218,6 +238,25 @@ impl AtHwcap {
}
};
+ // Samsung Exynos 9810 has a bug that big and little cores have different
+ // ISAs. And on older Android (pre-9), the kernel incorrectly reports
+ // that features available only on some cores are available on all cores.
+ // So, only check features that are known to be available on exynos-m3:
+ // $ rustc --print cfg --target aarch64-linux-android -C target-cpu=exynos-m3 | grep target_feature
+ // See also https://github.com/rust-lang/stdarch/pull/1378#discussion_r1103748342.
+ if is_exynos9810 {
+ enable_feature(Feature::fp, self.fp);
+ enable_feature(Feature::crc, self.crc32);
+ // ASIMD support requires float support - if half-floats are
+ // supported, it also requires half-float support:
+ let asimd = self.fp && self.asimd && (!self.fphp | self.asimdhp);
+ enable_feature(Feature::asimd, asimd);
+ // Cryptographic extensions require ASIMD
+ enable_feature(Feature::aes, self.aes && asimd);
+ enable_feature(Feature::sha2, self.sha1 && self.sha2 && asimd);
+ return value;
+ }
+
enable_feature(Feature::fp, self.fp);
// Half-float support requires float support
enable_feature(Feature::fp16, self.fp && self.fphp);
diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs
index d9e7b28ea..11d9c103e 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs
@@ -52,7 +52,12 @@ pub(crate) struct AuxVec {
/// Note that run-time feature detection is not invoked for features that can
/// be detected at compile-time. Also note that if this function returns an
/// error, cpuinfo still can (and will) be used to try to perform run-time
-/// feature detecton on some platforms.
+/// feature detection on some platforms.
+///
+/// Note: The `std_detect_dlsym_getauxval` cargo feature is ignored on `*-linux-gnu*` targets,
+/// since [all `*-linux-gnu*` targets ([since Rust 1.64](https://blog.rust-lang.org/2022/08/01/Increasing-glibc-kernel-requirements.html))
+/// have glibc requirements higher than [glibc 2.16 that added `getauxval`](https://sourceware.org/legacy-ml/libc-announce/2012/msg00000.html),
+/// and we can safely assume [`getauxval`] is linked to the binary.
///
/// For more information about when `getauxval` is available check the great
/// [`auxv` crate documentation][auxv_docs].
@@ -60,7 +65,10 @@ pub(crate) struct AuxVec {
/// [auxvec_h]: https://github.com/torvalds/linux/blob/master/include/uapi/linux/auxvec.h
/// [auxv_docs]: https://docs.rs/auxv/0.3.3/auxv/
pub(crate) fn auxv() -> Result<AuxVec, ()> {
- #[cfg(feature = "std_detect_dlsym_getauxval")]
+ #[cfg(all(
+ feature = "std_detect_dlsym_getauxval",
+ not(all(target_os = "linux", target_env = "gnu"))
+ ))]
{
// Try to call a dynamically-linked getauxval function.
if let Ok(hwcap) = getauxval(AT_HWCAP) {
@@ -101,7 +109,10 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> {
}
}
- #[cfg(not(feature = "std_detect_dlsym_getauxval"))]
+ #[cfg(any(
+ not(feature = "std_detect_dlsym_getauxval"),
+ all(target_os = "linux", target_env = "gnu")
+ ))]
{
// Targets with only AT_HWCAP:
#[cfg(any(
@@ -154,7 +165,10 @@ pub(crate) fn auxv() -> Result<AuxVec, ()> {
/// Tries to read the `key` from the auxiliary vector by calling the
/// dynamically-linked `getauxval` function. If the function is not linked,
/// this function return `Err`.
-#[cfg(feature = "std_detect_dlsym_getauxval")]
+#[cfg(all(
+ feature = "std_detect_dlsym_getauxval",
+ not(all(target_os = "linux", target_env = "gnu"))
+))]
fn getauxval(key: usize) -> Result<usize, ()> {
use libc;
pub type F = unsafe extern "C" fn(usize) -> usize;
diff --git a/library/stdarch/crates/std_detect/src/detect/os/openbsd/aarch64.rs b/library/stdarch/crates/std_detect/src/detect/os/openbsd/aarch64.rs
new file mode 100644
index 000000000..cfe4ad10a
--- /dev/null
+++ b/library/stdarch/crates/std_detect/src/detect/os/openbsd/aarch64.rs
@@ -0,0 +1,55 @@
+//! Run-time feature detection for Aarch64 on OpenBSD.
+//!
+//! OpenBSD doesn't trap the mrs instruction, but exposes the system registers through sysctl.
+//! https://github.com/openbsd/src/commit/d335af936b9d7dd9cf655cae1ce19560c45de6c8
+//! https://github.com/golang/go/commit/cd54ef1f61945459486e9eea2f016d99ef1da925
+
+use crate::detect::cache;
+use core::{mem::MaybeUninit, ptr};
+
+// Defined in machine/cpu.h.
+// https://github.com/openbsd/src/blob/72ccc03bd11da614f31f7ff76e3f6fce99bc1c79/sys/arch/arm64/include/cpu.h#L25-L40
+const CPU_ID_AA64ISAR0: libc::c_int = 2;
+const CPU_ID_AA64ISAR1: libc::c_int = 3;
+const CPU_ID_AA64MMFR2: libc::c_int = 7;
+const CPU_ID_AA64PFR0: libc::c_int = 8;
+
+/// Try to read the features from the system registers.
+pub(crate) fn detect_features() -> cache::Initializer {
+ // ID_AA64ISAR0_EL1 and ID_AA64ISAR1_EL1 are supported on OpenBSD 7.1+.
+ // https://github.com/openbsd/src/commit/d335af936b9d7dd9cf655cae1ce19560c45de6c8
+ // Others are supported on OpenBSD 7.3+.
+ // https://github.com/openbsd/src/commit/c7654cd65262d532212f65123ee3905ba200365c
+ // sysctl returns an unsupported error if operation is not supported,
+ // so we can safely use this function on older versions of OpenBSD.
+ let aa64isar0 = sysctl64(&[libc::CTL_MACHDEP, CPU_ID_AA64ISAR0]).unwrap_or(0);
+ let aa64isar1 = sysctl64(&[libc::CTL_MACHDEP, CPU_ID_AA64ISAR1]).unwrap_or(0);
+ let aa64mmfr2 = sysctl64(&[libc::CTL_MACHDEP, CPU_ID_AA64MMFR2]).unwrap_or(0);
+ // Do not use unwrap_or(0) because in fp and asimd fields, 0 indicates that
+ // the feature is available.
+ let aa64pfr0 = sysctl64(&[libc::CTL_MACHDEP, CPU_ID_AA64PFR0]);
+
+ super::aarch64::parse_system_registers(aa64isar0, aa64isar1, aa64mmfr2, aa64pfr0)
+}
+
+#[inline]
+fn sysctl64(mib: &[libc::c_int]) -> Option<u64> {
+ const OUT_LEN: libc::size_t = core::mem::size_of::<u64>();
+ let mut out = MaybeUninit::<u64>::uninit();
+ let mut out_len = OUT_LEN;
+ let res = unsafe {
+ libc::sysctl(
+ mib.as_ptr(),
+ mib.len() as libc::c_uint,
+ out.as_mut_ptr() as *mut libc::c_void,
+ &mut out_len,
+ ptr::null_mut(),
+ 0,
+ )
+ };
+ if res == -1 || out_len != OUT_LEN {
+ return None;
+ }
+ // SAFETY: we've checked that sysctl was successful and `out` was filled.
+ Some(unsafe { out.assume_init() })
+}
diff --git a/library/stdarch/crates/std_detect/src/detect/os/windows/aarch64.rs b/library/stdarch/crates/std_detect/src/detect/os/windows/aarch64.rs
index 051ad6d1b..faded671c 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/windows/aarch64.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/windows/aarch64.rs
@@ -11,9 +11,14 @@ pub(crate) fn detect_features() -> cache::Initializer {
// The following Microsoft documents isn't updated for aarch64.
// https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-isprocessorfeaturepresent
// These are defined in winnt.h of Windows SDK
+ const PF_ARM_VFP_32_REGISTERS_AVAILABLE: u32 = 18;
const PF_ARM_NEON_INSTRUCTIONS_AVAILABLE: u32 = 19;
const PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE: u32 = 30;
const PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE: u32 = 31;
+ const PF_ARM_V81_ATOMIC_INSTRUCTIONS_AVAILABLE: u32 = 34;
+ const PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE: u32 = 43;
+ const PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE: u32 = 44;
+ const PF_ARM_V83_LRCPC_INSTRUCTIONS_AVAILABLE: u32 = 45;
extern "system" {
pub fn IsProcessorFeaturePresent(ProcessorFeature: DWORD) -> BOOL;
@@ -27,11 +32,15 @@ pub(crate) fn detect_features() -> cache::Initializer {
}
};
- // Some features such Feature::fp may be supported on current CPU,
+ // Some features may be supported on current CPU,
// but no way to detect it by OS API.
// Also, we require unsafe block for the extern "system" calls.
unsafe {
enable_feature(
+ Feature::fp,
+ IsProcessorFeaturePresent(PF_ARM_VFP_32_REGISTERS_AVAILABLE) != FALSE,
+ );
+ enable_feature(
Feature::asimd,
IsProcessorFeaturePresent(PF_ARM_NEON_INSTRUCTIONS_AVAILABLE) != FALSE,
);
@@ -39,20 +48,29 @@ pub(crate) fn detect_features() -> cache::Initializer {
Feature::crc,
IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE) != FALSE,
);
- // PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE means aes, sha1, sha2 and
- // pmull support
enable_feature(
- Feature::aes,
- IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) != FALSE,
+ Feature::lse,
+ IsProcessorFeaturePresent(PF_ARM_V81_ATOMIC_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
- Feature::pmull,
- IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) != FALSE,
+ Feature::dotprod,
+ IsProcessorFeaturePresent(PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE) != FALSE,
);
enable_feature(
- Feature::sha2,
- IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) != FALSE,
+ Feature::jsconv,
+ IsProcessorFeaturePresent(PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE) != FALSE,
);
+ enable_feature(
+ Feature::rcpc,
+ IsProcessorFeaturePresent(PF_ARM_V83_LRCPC_INSTRUCTIONS_AVAILABLE) != FALSE,
+ );
+ // PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE means aes, sha1, sha2 and
+ // pmull support
+ let crypto =
+ IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) != FALSE;
+ enable_feature(Feature::aes, crypto);
+ enable_feature(Feature::pmull, crypto);
+ enable_feature(Feature::sha2, crypto);
}
}
value
diff --git a/library/stdarch/crates/std_detect/src/detect/os/x86.rs b/library/stdarch/crates/std_detect/src/detect/os/x86.rs
index 08f48cd17..d8afc1aca 100644
--- a/library/stdarch/crates/std_detect/src/detect/os/x86.rs
+++ b/library/stdarch/crates/std_detect/src/detect/os/x86.rs
@@ -111,6 +111,7 @@ pub(crate) fn detect_features() -> cache::Initializer {
enable(proc_info_ecx, 13, Feature::cmpxchg16b);
enable(proc_info_ecx, 19, Feature::sse4_1);
enable(proc_info_ecx, 20, Feature::sse4_2);
+ enable(proc_info_ecx, 22, Feature::movbe);
enable(proc_info_ecx, 23, Feature::popcnt);
enable(proc_info_ecx, 25, Feature::aes);
enable(proc_info_ecx, 29, Feature::f16c);
@@ -128,6 +129,8 @@ pub(crate) fn detect_features() -> cache::Initializer {
enable(extended_features_ebx, 3, Feature::bmi1);
enable(extended_features_ebx, 8, Feature::bmi2);
+ enable(extended_features_ebx, 9, Feature::ermsb);
+
// `XSAVE` and `AVX` support:
let cpu_xsave = bit::test(proc_info_ecx as usize, 26);
if cpu_xsave {
diff --git a/library/stdarch/crates/std_detect/src/lib.rs b/library/stdarch/crates/std_detect/src/lib.rs
index c0e0de0dd..c0819218c 100644
--- a/library/stdarch/crates/std_detect/src/lib.rs
+++ b/library/stdarch/crates/std_detect/src/lib.rs
@@ -19,6 +19,9 @@
#![deny(clippy::missing_inline_in_public_items)]
#![cfg_attr(test, allow(unused_imports))]
#![no_std]
+// FIXME(Nilstrieb): Remove this once the compiler in stdarch CI has the internal_features lint.
+#![allow(unknown_lints)]
+#![allow(internal_features)]
#[cfg(test)]
#[macro_use]
diff --git a/library/stdarch/crates/std_detect/tests/cpu-detection.rs b/library/stdarch/crates/std_detect/tests/cpu-detection.rs
index 02ad77a63..eb3a3e409 100644
--- a/library/stdarch/crates/std_detect/tests/cpu-detection.rs
+++ b/library/stdarch/crates/std_detect/tests/cpu-detection.rs
@@ -20,8 +20,11 @@ fn all() {
}
#[test]
-#[cfg(all(target_arch = "arm", any(target_os = "linux", target_os = "android")))]
-fn arm_linux() {
+#[cfg(all(
+ target_arch = "arm",
+ any(target_os = "linux", target_os = "android", target_os = "freebsd"),
+))]
+fn arm_linux_or_freebsd() {
println!("neon: {}", is_arm_feature_detected!("neon"));
println!("pmull: {}", is_arm_feature_detected!("pmull"));
println!("crc: {}", is_arm_feature_detected!("crc"));
@@ -84,6 +87,45 @@ fn aarch64_linux() {
}
#[test]
+#[cfg(all(target_arch = "aarch64", target_os = "windows"))]
+fn aarch64_windows() {
+ println!("asimd: {:?}", is_aarch64_feature_detected!("asimd"));
+ println!("fp: {:?}", is_aarch64_feature_detected!("fp"));
+ println!("crc: {:?}", is_aarch64_feature_detected!("crc"));
+ println!("lse: {:?}", is_aarch64_feature_detected!("lse"));
+ println!("dotprod: {:?}", is_aarch64_feature_detected!("dotprod"));
+ println!("jsconv: {:?}", is_aarch64_feature_detected!("jsconv"));
+ println!("rcpc: {:?}", is_aarch64_feature_detected!("rcpc"));
+ println!("aes: {:?}", is_aarch64_feature_detected!("aes"));
+ println!("pmull: {:?}", is_aarch64_feature_detected!("pmull"));
+ println!("sha2: {:?}", is_aarch64_feature_detected!("sha2"));
+}
+
+#[test]
+#[cfg(all(
+ target_arch = "aarch64",
+ any(target_os = "freebsd", target_os = "openbsd")
+))]
+fn aarch64_bsd() {
+ println!("asimd: {:?}", is_aarch64_feature_detected!("asimd"));
+ println!("pmull: {:?}", is_aarch64_feature_detected!("pmull"));
+ println!("fp: {:?}", is_aarch64_feature_detected!("fp"));
+ println!("fp16: {:?}", is_aarch64_feature_detected!("fp16"));
+ println!("sve: {:?}", is_aarch64_feature_detected!("sve"));
+ println!("crc: {:?}", is_aarch64_feature_detected!("crc"));
+ println!("lse: {:?}", is_aarch64_feature_detected!("lse"));
+ println!("lse2: {:?}", is_aarch64_feature_detected!("lse2"));
+ println!("rdm: {:?}", is_aarch64_feature_detected!("rdm"));
+ println!("rcpc: {:?}", is_aarch64_feature_detected!("rcpc"));
+ println!("dotprod: {:?}", is_aarch64_feature_detected!("dotprod"));
+ println!("tme: {:?}", is_aarch64_feature_detected!("tme"));
+ println!("paca: {:?}", is_aarch64_feature_detected!("paca"));
+ println!("pacg: {:?}", is_aarch64_feature_detected!("pacg"));
+ println!("aes: {:?}", is_aarch64_feature_detected!("aes"));
+ println!("sha2: {:?}", is_aarch64_feature_detected!("sha2"));
+}
+
+#[test]
#[cfg(all(target_arch = "powerpc", target_os = "linux"))]
fn powerpc_linux() {
println!("altivec: {}", is_powerpc_feature_detected!("altivec"));
@@ -152,6 +194,7 @@ fn x86_all() {
println!("abm: {:?}", is_x86_feature_detected!("abm"));
println!("lzcnt: {:?}", is_x86_feature_detected!("lzcnt"));
println!("tbm: {:?}", is_x86_feature_detected!("tbm"));
+ println!("movbe: {:?}", is_x86_feature_detected!("movbe"));
println!("popcnt: {:?}", is_x86_feature_detected!("popcnt"));
println!("fxsr: {:?}", is_x86_feature_detected!("fxsr"));
println!("xsave: {:?}", is_x86_feature_detected!("xsave"));
@@ -159,3 +202,15 @@ fn x86_all() {
println!("xsaves: {:?}", is_x86_feature_detected!("xsaves"));
println!("xsavec: {:?}", is_x86_feature_detected!("xsavec"));
}
+
+#[test]
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+#[allow(deprecated)]
+fn x86_deprecated() {
+ println!("avx512gfni {:?}", is_x86_feature_detected!("avx512gfni"));
+ println!("avx512vaes {:?}", is_x86_feature_detected!("avx512vaes"));
+ println!(
+ "avx512vpclmulqdq {:?}",
+ is_x86_feature_detected!("avx512vpclmulqdq")
+ );
+}
diff --git a/library/stdarch/crates/std_detect/tests/x86-specific.rs b/library/stdarch/crates/std_detect/tests/x86-specific.rs
index e481620c7..38512c758 100644
--- a/library/stdarch/crates/std_detect/tests/x86-specific.rs
+++ b/library/stdarch/crates/std_detect/tests/x86-specific.rs
@@ -20,6 +20,7 @@ fn dump() {
println!("sse4.2: {:?}", is_x86_feature_detected!("sse4.2"));
println!("sse4a: {:?}", is_x86_feature_detected!("sse4a"));
println!("sha: {:?}", is_x86_feature_detected!("sha"));
+ println!("f16c: {:?}", is_x86_feature_detected!("f16c"));
println!("avx: {:?}", is_x86_feature_detected!("avx"));
println!("avx2: {:?}", is_x86_feature_detected!("avx2"));
println!("avx512f {:?}", is_x86_feature_detected!("avx512f"));
@@ -64,6 +65,7 @@ fn dump() {
println!("cmpxchg16b: {:?}", is_x86_feature_detected!("cmpxchg16b"));
println!("adx: {:?}", is_x86_feature_detected!("adx"));
println!("rtm: {:?}", is_x86_feature_detected!("rtm"));
+ println!("movbe: {:?}", is_x86_feature_detected!("movbe"));
}
#[cfg(feature = "std_detect_env_override")]
@@ -108,6 +110,7 @@ fn compare_with_cupid() {
assert_eq!(is_x86_feature_detected!("sse4.2"), information.sse4_2());
assert_eq!(is_x86_feature_detected!("sse4a"), information.sse4a());
assert_eq!(is_x86_feature_detected!("sha"), information.sha());
+ assert_eq!(is_x86_feature_detected!("f16c"), information.f16c());
assert_eq!(is_x86_feature_detected!("avx"), information.avx());
assert_eq!(is_x86_feature_detected!("avx2"), information.avx2());
assert_eq!(is_x86_feature_detected!("avx512f"), information.avx512f());
@@ -152,4 +155,5 @@ fn compare_with_cupid() {
);
assert_eq!(is_x86_feature_detected!("adx"), information.adx(),);
assert_eq!(is_x86_feature_detected!("rtm"), information.rtm(),);
+ assert_eq!(is_x86_feature_detected!("movbe"), information.movbe(),);
}
diff --git a/library/stdarch/crates/stdarch-gen/neon.spec b/library/stdarch/crates/stdarch-gen/neon.spec
index 95fbc354c..f2c1e200d 100644
--- a/library/stdarch/crates/stdarch-gen/neon.spec
+++ b/library/stdarch/crates/stdarch-gen/neon.spec
@@ -14,7 +14,7 @@
// Sections start with EXACTLY three slashes followed
// by AT LEAST one space. Sections are used for two things:
//
-// 1) they serve as the doc comment for the given intrinics.
+// 1) they serve as the doc comment for the given intrinsics.
// 2) they reset all variables (name, fn, etc.)
//
// # Variables
@@ -29,16 +29,16 @@
// the function will exclusively be generated for
// aarch64.
// This is used to generate both aarch64 specific and
-// shared intrinics by first only specifying th aarch64
+// shared intrinsics by first only specifying th aarch64
// variant then the arm variant.
//
-// arm - The arm v7 intrinics used to checked for arm code
+// arm - The arm v7 intrinsics used to checked for arm code
// generation. All neon functions available in arm are
-// also available in aarch64. If no aarch64 intrinic was
+// also available in aarch64. If no aarch64 intrinsic was
// set they are assumed to be the same.
-// Intrinics ending with a `.` will have a size suffixes
+// Intrinsics ending with a `.` will have a size suffixes
// added (such as `i8` or `i64`) that is not sign specific
-// Intrinics ending with a `.s` will have a size suffixes
+// Intrinsics ending with a `.s` will have a size suffixes
// added (such as `s8` or `u64`) that is sign specific
//
// a - First input for tests, it gets scaled to the size of
@@ -218,8 +218,8 @@ generate int32x2_t:int32x2_t:int64x2_t
/// Unsigned Absolute difference Long
name = vabdl_high
no-q
-multi_fn = simd_shuffle8!, c:uint8x8_t, a, a, [8, 9, 10, 11, 12, 13, 14, 15]
-multi_fn = simd_shuffle8!, d:uint8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, c:uint8x8_t, a, a, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, d:uint8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
multi_fn = simd_cast, {vabd_u8, c, d}
a = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
b = 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10
@@ -231,8 +231,8 @@ generate uint8x16_t:uint8x16_t:uint16x8_t
/// Unsigned Absolute difference Long
name = vabdl_high
no-q
-multi_fn = simd_shuffle4!, c:uint16x4_t, a, a, [4, 5, 6, 7]
-multi_fn = simd_shuffle4!, d:uint16x4_t, b, b, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, c:uint16x4_t, a, a, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, d:uint16x4_t, b, b, [4, 5, 6, 7]
multi_fn = simd_cast, {vabd_u16, c, d}
a = 1, 2, 3, 4, 8, 9, 11, 12
b = 10, 10, 10, 10, 10, 10, 10, 10
@@ -244,8 +244,8 @@ generate uint16x8_t:uint16x8_t:uint32x4_t
/// Unsigned Absolute difference Long
name = vabdl_high
no-q
-multi_fn = simd_shuffle2!, c:uint32x2_t, a, a, [2, 3]
-multi_fn = simd_shuffle2!, d:uint32x2_t, b, b, [2, 3]
+multi_fn = simd_shuffle!, c:uint32x2_t, a, a, [2, 3]
+multi_fn = simd_shuffle!, d:uint32x2_t, b, b, [2, 3]
multi_fn = simd_cast, {vabd_u32, c, d}
a = 1, 2, 3, 4
b = 10, 10, 10, 10
@@ -257,8 +257,8 @@ generate uint32x4_t:uint32x4_t:uint64x2_t
/// Signed Absolute difference Long
name = vabdl_high
no-q
-multi_fn = simd_shuffle8!, c:int8x8_t, a, a, [8, 9, 10, 11, 12, 13, 14, 15]
-multi_fn = simd_shuffle8!, d:int8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, c:int8x8_t, a, a, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, d:int8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
multi_fn = simd_cast, e:uint8x8_t, {vabd_s8, c, d}
multi_fn = simd_cast, e
a = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
@@ -271,8 +271,8 @@ generate int8x16_t:int8x16_t:int16x8_t
/// Signed Absolute difference Long
name = vabdl_high
no-q
-multi_fn = simd_shuffle4!, c:int16x4_t, a, a, [4, 5, 6, 7]
-multi_fn = simd_shuffle4!, d:int16x4_t, b, b, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, c:int16x4_t, a, a, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, d:int16x4_t, b, b, [4, 5, 6, 7]
multi_fn = simd_cast, e:uint16x4_t, {vabd_s16, c, d}
multi_fn = simd_cast, e
a = 1, 2, 3, 4, 9, 10, 11, 12
@@ -285,8 +285,8 @@ generate int16x8_t:int16x8_t:int32x4_t
/// Signed Absolute difference Long
name = vabdl_high
no-q
-multi_fn = simd_shuffle2!, c:int32x2_t, a, a, [2, 3]
-multi_fn = simd_shuffle2!, d:int32x2_t, b, b, [2, 3]
+multi_fn = simd_shuffle!, c:int32x2_t, a, a, [2, 3]
+multi_fn = simd_shuffle!, d:int32x2_t, b, b, [2, 3]
multi_fn = simd_cast, e:uint32x2_t, {vabd_s32, c, d}
multi_fn = simd_cast, e
a = 1, 2, 3, 4
@@ -978,7 +978,7 @@ lane-suffixes
constn = LANE1:LANE2
multi_fn = static_assert_imm-in0_exp_len-LANE1
multi_fn = static_assert_imm-in_exp_len-LANE2
-multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle-out_len-!, a, b, {ins-in0_len-in0_len-LANE2}
+multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle!, a, b, {ins-in0_len-in0_len-LANE2}
a = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
b = 0, MAX, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
n = 0:1
@@ -995,7 +995,7 @@ lane-suffixes
constn = LANE1:LANE2
multi_fn = static_assert_imm-in0_exp_len-LANE1
multi_fn = static_assert_imm-in_exp_len-LANE2
-multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle-out_len-!, a, b, {ins-in0_len-in0_len-LANE2}
+multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle!, a, b, {ins-in0_len-in0_len-LANE2}
a = 1., 2., 3., 4.
b = 0., 0.5, 0., 0.
n = 0:1
@@ -1010,8 +1010,8 @@ lane-suffixes
constn = LANE1:LANE2
multi_fn = static_assert_imm-in0_exp_len-LANE1
multi_fn = static_assert_imm-in_exp_len-LANE2
-multi_fn = simd_shuffle-in_len-!, a:in_t, a, a, {asc-0-in_len}
-multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle-out_len-!, a, b, {ins-in0_len-in_len-LANE2}
+multi_fn = simd_shuffle!, a:in_t, a, a, {asc-0-in_len}
+multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle!, a, b, {ins-in0_len-in_len-LANE2}
a = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
b = 0, MAX, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
n = 0:1
@@ -1028,8 +1028,8 @@ lane-suffixes
constn = LANE1:LANE2
multi_fn = static_assert_imm-in0_exp_len-LANE1
multi_fn = static_assert_imm-in_exp_len-LANE2
-multi_fn = simd_shuffle-in_len-!, a:in_t, a, a, {asc-0-in_len}
-multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle-out_len-!, a, b, {ins-in0_len-in_len-LANE2}
+multi_fn = simd_shuffle!, a:in_t, a, a, {asc-0-in_len}
+multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle!, a, b, {ins-in0_len-in_len-LANE2}
a = 1., 2., 3., 4.
b = 0., 0.5, 0., 0.
n = 0:1
@@ -1044,8 +1044,8 @@ lane-suffixes
constn = LANE1:LANE2
multi_fn = static_assert_imm-in0_exp_len-LANE1
multi_fn = static_assert_imm-in_exp_len-LANE2
-multi_fn = simd_shuffle-in0_len-!, b:in_t0, b, b, {asc-0-in0_len}
-multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle-out_len-!, a, b, {ins-in0_len-in0_len-LANE2}
+multi_fn = simd_shuffle!, b:in_t0, b, b, {asc-0-in0_len}
+multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle!, a, b, {ins-in0_len-in0_len-LANE2}
a = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
b = 0, MAX, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
n = 0:1
@@ -1062,8 +1062,8 @@ lane-suffixes
constn = LANE1:LANE2
multi_fn = static_assert_imm-in0_exp_len-LANE1
multi_fn = static_assert_imm-in_exp_len-LANE2
-multi_fn = simd_shuffle-in0_len-!, b:in_t0, b, b, {asc-0-in0_len}
-multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle-out_len-!, a, b, {ins-in0_len-in0_len-LANE2}
+multi_fn = simd_shuffle!, b:in_t0, b, b, {asc-0-in0_len}
+multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle!, a, b, {ins-in0_len-in0_len-LANE2}
a = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
b = MAX, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
n = 1:0
@@ -1078,8 +1078,8 @@ lane-suffixes
constn = LANE1:LANE2
multi_fn = static_assert_imm-in0_exp_len-LANE1
multi_fn = static_assert_imm-in_exp_len-LANE2
-multi_fn = simd_shuffle-in0_len-!, b:in_t0, b, b, {asc-0-in0_len}
-multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle-out_len-!, a, b, {ins-in0_len-in0_len-LANE2}
+multi_fn = simd_shuffle!, b:in_t0, b, b, {asc-0-in0_len}
+multi_fn = matchn-in0_exp_len-LANE1, simd_shuffle!, a, b, {ins-in0_len-in0_len-LANE2}
a = 1., 2., 3., 4.
b = 0.5, 0., 0., 0.
n = 1:0
@@ -1148,7 +1148,7 @@ generate float32x2_t:float64x2_t
/// Floating-point convert to higher precision long
name = vcvt_high
noq-double-suffixes
-multi_fn = simd_shuffle2!, b:float32x2_t, a, a, [2, 3]
+multi_fn = simd_shuffle!, b:float32x2_t, a, a, [2, 3]
multi_fn = simd_cast, b
a = -1.2, 1.2, 2.3, 3.4
validate 2.3f32 as f64, 3.4f32 as f64
@@ -1169,7 +1169,7 @@ generate float64x2_t:float32x2_t
/// Floating-point convert to lower precision narrow
name = vcvt_high
noq-double-suffixes
-multi_fn = simd_shuffle4!, a, {simd_cast, b}, [0, 1, 2, 3]
+multi_fn = simd_shuffle!, a, {simd_cast, b}, [0, 1, 2, 3]
a = -1.2, 1.2
b = -2.3, 3.4
validate -1.2, 1.2, -2.3f64 as f32, 3.4f64 as f32
@@ -1200,7 +1200,7 @@ generate f64:f32
/// Floating-point convert to lower precision narrow, rounding to odd
name = vcvtx_high
noq-double-suffixes
-multi_fn = simd_shuffle4!, a, {vcvtx-noq_doubleself-noext, b}, [0, 1, 2, 3]
+multi_fn = simd_shuffle!, a, {vcvtx-noq_doubleself-noext, b}, [0, 1, 2, 3]
a = -1.0, 2.0
b = -3.0, 4.0
validate -1.0, 2.0, -3.0, 4.0
@@ -1428,7 +1428,7 @@ name = vdup
lane-suffixes
constn = N
multi_fn = static_assert_imm-in_exp_len-N
-multi_fn = simd_shuffle-out_len-!, a, a, {dup-out_len-N as u32}
+multi_fn = simd_shuffle!, a, a, {dup-out_len-N as u32}
a = 1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16
n = HFLEN
validate 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
@@ -1454,7 +1454,7 @@ name = vdup
lane-suffixes
constn = N
multi_fn = static_assert_imm-in_exp_len-N
-multi_fn = simd_shuffle-out_len-!, a, a, {dup-out_len-N as u32}
+multi_fn = simd_shuffle!, a, a, {dup-out_len-N as u32}
a = 1, 1, 1, 4, 1, 6, 7, 8, 1, 10, 11, 12, 13, 14, 15, 16
n = HFLEN
validate 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
@@ -1468,7 +1468,7 @@ name = vdup
lane-suffixes
constn = N
multi_fn = static_assert_imm-in_exp_len-N
-multi_fn = simd_shuffle-out_len-!, a, a, {dup-out_len-N as u32}
+multi_fn = simd_shuffle!, a, a, {dup-out_len-N as u32}
a = 1., 1., 1., 4.
n = HFLEN
validate 1., 1., 1., 1.
@@ -1569,7 +1569,7 @@ generate float32x2_t:f32, float32x4_t:f32, float64x1_t:f64, float64x2_t:f64
name = vext
constn = N
multi_fn = static_assert_imm-out_exp_len-N
-multi_fn = matchn-out_exp_len-N, simd_shuffle-out_len-!, a, b, {asc-n-out_len}
+multi_fn = matchn-out_exp_len-N, simd_shuffle!, a, b, {asc-n-out_len}
a = 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
b = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
n = LEN_M1
@@ -1583,7 +1583,7 @@ generate int*_t, uint*_t, poly8x8_t, poly8x16_t, poly16x4_t, poly16x8_t
name = vext
constn = N
multi_fn = static_assert_imm-out_exp_len-N
-multi_fn = matchn-out_exp_len-N, simd_shuffle-out_len-!, a, b, {asc-n-out_len}
+multi_fn = matchn-out_exp_len-N, simd_shuffle!, a, b, {asc-n-out_len}
a = 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
b = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
n = LEN_M1
@@ -1599,7 +1599,7 @@ generate int64x2_t, uint64x2_t
name = vext
constn = N
multi_fn = static_assert_imm-out_exp_len-N
-multi_fn = matchn-out_exp_len-N, simd_shuffle-out_len-!, a, b, {asc-n-out_len}
+multi_fn = matchn-out_exp_len-N, simd_shuffle!, a, b, {asc-n-out_len}
a = 1., 1., 1., 1.
b = 2., 2., 2., 2.,
n = LEN_M1
@@ -1669,7 +1669,7 @@ name = vmla
in2-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_exp_len-LANE
-multi_fn = vmla-self-noext, a, b, {simd_shuffle-in_len-!, c, c, {dup-in_len-LANE as u32}}
+multi_fn = vmla-self-noext, a, b, {simd_shuffle!, c, c, {dup-in_len-LANE as u32}}
a = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
b = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
c = 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
@@ -1688,7 +1688,7 @@ name = vmla
in2-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_exp_len-LANE
-multi_fn = vmla-self-noext, a, b, {simd_shuffle-in_len-!, c, c, {dup-in_len-LANE as u32}}
+multi_fn = vmla-self-noext, a, b, {simd_shuffle!, c, c, {dup-in_len-LANE as u32}}
a = 0., 1., 2., 3.
b = 2., 2., 2., 2.
c = 0., 3., 0., 0.
@@ -1743,7 +1743,7 @@ name = vmlal_lane
in2-suffix
constn = LANE
multi_fn = static_assert_imm-in2_exp_len-LANE
-multi_fn = vmlal-self-noext, a, b, {simd_shuffle-in_len-!, c, c, {dup-in_len-LANE as u32}}
+multi_fn = vmlal-self-noext, a, b, {simd_shuffle!, c, c, {dup-in_len-LANE as u32}}
a = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
b = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
c = 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
@@ -1761,8 +1761,8 @@ generate uint64x2_t:uint32x2_t:uint32x2_t:uint64x2_t, uint64x2_t:uint32x2_t:uint
/// Signed multiply-add long
name = vmlal_high
no-q
-multi_fn = simd_shuffle-out_len-!, b:half, b, b, {fixed-half-right}
-multi_fn = simd_shuffle-out_len-!, c:half, c, c, {fixed-half-right}
+multi_fn = simd_shuffle!, b:half, b, b, {fixed-half-right}
+multi_fn = simd_shuffle!, c:half, c, c, {fixed-half-right}
multi_fn = vmlal-noqself-noext, a, b, c
a = 8, 7, 6, 5, 4, 3, 2, 1
b = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
@@ -1776,8 +1776,8 @@ generate int16x8_t:int8x16_t:int8x16_t:int16x8_t, int32x4_t:int16x8_t:int16x8_t:
/// Unsigned multiply-add long
name = vmlal_high
no-q
-multi_fn = simd_shuffle-out_len-!, b:half, b, b, {fixed-half-right}
-multi_fn = simd_shuffle-out_len-!, c:half, c, c, {fixed-half-right}
+multi_fn = simd_shuffle!, b:half, b, b, {fixed-half-right}
+multi_fn = simd_shuffle!, c:half, c, c, {fixed-half-right}
multi_fn = vmlal-noqself-noext, a, b, c
a = 8, 7, 6, 5, 4, 3, 2, 1
b = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
@@ -1807,7 +1807,7 @@ name = vmlal_high_lane
in2-suffix
constn = LANE
multi_fn = static_assert_imm-in2_exp_len-LANE
-multi_fn = vmlal_high-noqself-noext, a, b, {simd_shuffle-in_len-!, c, c, {dup-in_len-LANE as u32}}
+multi_fn = vmlal_high-noqself-noext, a, b, {simd_shuffle!, c, c, {dup-in_len-LANE as u32}}
a = 8, 7, 6, 5, 4, 3, 2, 1
b = 3, 3, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7
c = 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
@@ -1879,7 +1879,7 @@ name = vmls
in2-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_exp_len-LANE
-multi_fn = vmls-self-noext, a, b, {simd_shuffle-in_len-!, c, c, {dup-in_len-LANE as u32}}
+multi_fn = vmls-self-noext, a, b, {simd_shuffle!, c, c, {dup-in_len-LANE as u32}}
a = 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21
b = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
c = 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
@@ -1898,7 +1898,7 @@ name = vmls
in2-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_exp_len-LANE
-multi_fn = vmls-self-noext, a, b, {simd_shuffle-in_len-!, c, c, {dup-in_len-LANE as u32}}
+multi_fn = vmls-self-noext, a, b, {simd_shuffle!, c, c, {dup-in_len-LANE as u32}}
a = 6., 7., 8., 9.
b = 2., 2., 2., 2.
c = 0., 3., 0., 0.
@@ -1953,7 +1953,7 @@ name = vmlsl_lane
in2-suffix
constn = LANE
multi_fn = static_assert_imm-in2_exp_len-LANE
-multi_fn = vmlsl-self-noext, a, b, {simd_shuffle-in_len-!, c, c, {dup-in_len-LANE as u32}}
+multi_fn = vmlsl-self-noext, a, b, {simd_shuffle!, c, c, {dup-in_len-LANE as u32}}
a = 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21
b = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
c = 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
@@ -1971,8 +1971,8 @@ generate uint64x2_t:uint32x2_t:uint32x2_t:uint64x2_t, uint64x2_t:uint32x2_t:uint
/// Signed multiply-subtract long
name = vmlsl_high
no-q
-multi_fn = simd_shuffle-out_len-!, b:half, b, b, {fixed-half-right}
-multi_fn = simd_shuffle-out_len-!, c:half, c, c, {fixed-half-right}
+multi_fn = simd_shuffle!, b:half, b, b, {fixed-half-right}
+multi_fn = simd_shuffle!, c:half, c, c, {fixed-half-right}
multi_fn = vmlsl-noqself-noext, a, b, c
a = 14, 15, 16, 17, 18, 19, 20, 21
b = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
@@ -1986,8 +1986,8 @@ generate int16x8_t:int8x16_t:int8x16_t:int16x8_t, int32x4_t:int16x8_t:int16x8_t:
/// Unsigned multiply-subtract long
name = vmlsl_high
no-q
-multi_fn = simd_shuffle-out_len-!, b:half, b, b, {fixed-half-right}
-multi_fn = simd_shuffle-out_len-!, c:half, c, c, {fixed-half-right}
+multi_fn = simd_shuffle!, b:half, b, b, {fixed-half-right}
+multi_fn = simd_shuffle!, c:half, c, c, {fixed-half-right}
multi_fn = vmlsl-noqself-noext, a, b, c
a = 14, 15, 16, 17, 18, 19, 20, 21
b = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
@@ -2017,7 +2017,7 @@ name = vmlsl_high_lane
in2-suffix
constn = LANE
multi_fn = static_assert_imm-in2_exp_len-LANE
-multi_fn = vmlsl_high-noqself-noext, a, b, {simd_shuffle-in_len-!, c, c, {dup-in_len-LANE as u32}}
+multi_fn = vmlsl_high-noqself-noext, a, b, {simd_shuffle!, c, c, {dup-in_len-LANE as u32}}
a = 14, 15, 16, 17, 18, 19, 20, 21
b = 3, 3, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7
c = 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
@@ -2035,7 +2035,7 @@ generate uint64x2_t:uint32x4_t:uint32x2_t:uint64x2_t, uint64x2_t:uint32x4_t:uint
name = vmovn_high
no-q
multi_fn = simd_cast, c:in_t0, b
-multi_fn = simd_shuffle-out_len-!, a, c, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, c, {asc-0-out_len}
a = 0, 1, 2, 3, 2, 3, 4, 5
b = 2, 3, 4, 5, 12, 13, 14, 15
validate 0, 1, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 12, 13, 14, 15
@@ -3483,7 +3483,7 @@ name = vsudot
out-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_dot-LANE
-multi_fn = simd_shuffle-in_len-!, c:unsigned, c, c, {base-4-LANE}
+multi_fn = simd_shuffle!, c:unsigned, c, c, {base-4-LANE}
multi_fn = vsudot-outlane-_, a, b, c
a = 1, 2, 1, 2
b = 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8
@@ -3567,7 +3567,7 @@ name = vmul
lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in_exp_len-LANE
-multi_fn = simd_mul, a, {simd_shuffle-out_len-!, b, b, {dup-out_len-LANE as u32}}
+multi_fn = simd_mul, a, {simd_shuffle!, b, b, {dup-out_len-LANE as u32}}
a = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
b = 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
n = 1
@@ -3599,7 +3599,7 @@ name = vmul
lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in_exp_len-LANE
-multi_fn = simd_mul, a, {simd_shuffle-out_len-!, b, b, {dup-out_len-LANE as u32}}
+multi_fn = simd_mul, a, {simd_shuffle!, b, b, {dup-out_len-LANE as u32}}
a = 1., 2., 3., 4.
b = 2., 0., 0., 0.
n = 0
@@ -3652,8 +3652,8 @@ generate int8x8_t:int8x8_t:int16x8_t, int16x4_t:int16x4_t:int32x4_t, int32x2_t:i
/// Signed multiply long
name = vmull_high
no-q
-multi_fn = simd_shuffle-out_len-!, a:half, a, a, {fixed-half-right}
-multi_fn = simd_shuffle-out_len-!, b:half, b, b, {fixed-half-right}
+multi_fn = simd_shuffle!, a:half, a, a, {fixed-half-right}
+multi_fn = simd_shuffle!, b:half, b, b, {fixed-half-right}
multi_fn = vmull-noqself-noext, a, b
a = 1, 2, 9, 10, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16
b = 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2
@@ -3678,8 +3678,8 @@ generate uint8x8_t:uint8x8_t:uint16x8_t, uint16x4_t:uint16x4_t:uint32x4_t, uint3
/// Unsigned multiply long
name = vmull_high
no-q
-multi_fn = simd_shuffle-out_len-!, a:half, a, a, {fixed-half-right}
-multi_fn = simd_shuffle-out_len-!, b:half, b, b, {fixed-half-right}
+multi_fn = simd_shuffle!, a:half, a, a, {fixed-half-right}
+multi_fn = simd_shuffle!, b:half, b, b, {fixed-half-right}
multi_fn = vmull-noqself-noext, a, b
a = 1, 2, 9, 10, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16
b = 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2
@@ -3720,8 +3720,8 @@ generate p64:p64:p128
/// Polynomial multiply long
name = vmull_high
no-q
-multi_fn = simd_shuffle-out_len-!, a:half, a, a, {fixed-half-right}
-multi_fn = simd_shuffle-out_len-!, b:half, b, b, {fixed-half-right}
+multi_fn = simd_shuffle!, a:half, a, a, {fixed-half-right}
+multi_fn = simd_shuffle!, b:half, b, b, {fixed-half-right}
multi_fn = vmull-noqself-noext, a, b
a = 1, 2, 9, 10, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16
b = 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3
@@ -3761,7 +3761,7 @@ generate uint16x4_t:u16:uint32x4_t, uint32x2_t:u32:uint64x2_t
name = vmull_lane
constn = LANE
multi_fn = static_assert_imm-in_exp_len-LANE
-multi_fn = vmull-in0-noext, a, {simd_shuffle-in0_len-!, b, b, {dup-in0_len-LANE as u32}}
+multi_fn = vmull-in0-noext, a, {simd_shuffle!, b, b, {dup-in0_len-LANE as u32}}
a = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
b = 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
n = 1
@@ -3792,7 +3792,7 @@ generate uint16x8_t:u16:uint32x4_t, uint32x4_t:u32:uint64x2_t
name = vmull_high_lane
constn = LANE
multi_fn = static_assert_imm-in_exp_len-LANE
-multi_fn = vmull_high-noqself-noext, a, {simd_shuffle-in0_len-!, b, b, {dup-in0_len-LANE as u32}}
+multi_fn = vmull_high-noqself-noext, a, {simd_shuffle!, b, b, {dup-in0_len-LANE as u32}}
a = 1, 2, 9, 10, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16
b = 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
n = 1
@@ -3834,7 +3834,7 @@ name = vmulx
lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in_exp_len-LANE
-multi_fn = vmulx-in0-noext, a, {simd_shuffle-in0_len-!, b, b, {dup-in0_len-LANE as u32}}
+multi_fn = vmulx-in0-noext, a, {simd_shuffle!, b, b, {dup-in0_len-LANE as u32}}
a = 1., 2., 3., 4.
b = 2., 0., 0., 0.
n = 0
@@ -4196,7 +4196,7 @@ generate uint16x8_t:uint8x8_t, uint32x4_t:uint16x4_t, uint64x2_t:uint32x2_t
name = vsubhn_high
no-q
multi_fn = vsubhn-noqself-noext, d:in_t0, b, c
-multi_fn = simd_shuffle-out_len-!, a, d, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, d, {asc-0-out_len}
a = MAX, 0, MAX, 0, MAX, 0, MAX, 0
b = MAX, 1, MAX, 1, MAX, 1, MAX, 1
c = 1, 0, 1, 0, 1, 0, 1, 0
@@ -4252,7 +4252,7 @@ generate uint16x8_t:uint8x8_t:uint16x8_t, uint32x4_t:uint16x4_t:uint32x4_t, uint
/// Signed Subtract Wide
name = vsubw_high
no-q
-multi_fn = simd_shuffle8!, c:int8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, c:int8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
multi_fn = simd_sub, a, {simd_cast, c}
a = 8, 9, 10, 12, 13, 14, 15, 16
b = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16
@@ -4264,7 +4264,7 @@ generate int16x8_t:int8x16_t:int16x8_t
/// Signed Subtract Wide
name = vsubw_high
no-q
-multi_fn = simd_shuffle4!, c:int16x4_t, b, b, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, c:int16x4_t, b, b, [4, 5, 6, 7]
multi_fn = simd_sub, a, {simd_cast, c}
a = 8, 9, 10, 11
b = 0, 1, 2, 3, 8, 9, 10, 11
@@ -4276,7 +4276,7 @@ generate int32x4_t:int16x8_t:int32x4_t
/// Signed Subtract Wide
name = vsubw_high
no-q
-multi_fn = simd_shuffle2!, c:int32x2_t, b, b, [2, 3]
+multi_fn = simd_shuffle!, c:int32x2_t, b, b, [2, 3]
multi_fn = simd_sub, a, {simd_cast, c}
a = 8, 9
b = 6, 7, 8, 9
@@ -4288,7 +4288,7 @@ generate int64x2_t:int32x4_t:int64x2_t
/// Unsigned Subtract Wide
name = vsubw_high
no-q
-multi_fn = simd_shuffle8!, c:uint8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, c:uint8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
multi_fn = simd_sub, a, {simd_cast, c}
a = 8, 9, 10, 11, 12, 13, 14, 15
b = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
@@ -4300,7 +4300,7 @@ generate uint16x8_t:uint8x16_t:uint16x8_t
/// Unsigned Subtract Wide
name = vsubw_high
no-q
-multi_fn = simd_shuffle4!, c:uint16x4_t, b, b, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, c:uint16x4_t, b, b, [4, 5, 6, 7]
multi_fn = simd_sub, a, {simd_cast, c}
a = 8, 9, 10, 11
b = 0, 1, 2, 3, 8, 9, 10, 11
@@ -4312,7 +4312,7 @@ generate uint32x4_t:uint16x8_t:uint32x4_t
/// Unsigned Subtract Wide
name = vsubw_high
no-q
-multi_fn = simd_shuffle2!, c:uint32x2_t, b, b, [2, 3]
+multi_fn = simd_shuffle!, c:uint32x2_t, b, b, [2, 3]
multi_fn = simd_sub, a, {simd_cast, c}
a = 8, 9
b = 6, 7, 8, 9
@@ -4354,9 +4354,9 @@ generate uint8x8_t:uint8x8_t:uint16x8_t, uint16x4_t:uint16x4_t:uint32x4_t, uint3
/// Signed Subtract Long
name = vsubl_high
no-q
-multi_fn = simd_shuffle8!, c:int8x8_t, a, a, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, c:int8x8_t, a, a, [8, 9, 10, 11, 12, 13, 14, 15]
multi_fn = simd_cast, d:out_t, c
-multi_fn = simd_shuffle8!, e:int8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, e:int8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
multi_fn = simd_cast, f:out_t, e
multi_fn = simd_sub, d, f
@@ -4370,9 +4370,9 @@ generate int8x16_t:int8x16_t:int16x8_t
/// Signed Subtract Long
name = vsubl_high
no-q
-multi_fn = simd_shuffle4!, c:int16x4_t, a, a, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, c:int16x4_t, a, a, [4, 5, 6, 7]
multi_fn = simd_cast, d:out_t, c
-multi_fn = simd_shuffle4!, e:int16x4_t, b, b, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, e:int16x4_t, b, b, [4, 5, 6, 7]
multi_fn = simd_cast, f:out_t, e
multi_fn = simd_sub, d, f
@@ -4386,9 +4386,9 @@ generate int16x8_t:int16x8_t:int32x4_t
/// Signed Subtract Long
name = vsubl_high
no-q
-multi_fn = simd_shuffle2!, c:int32x2_t, a, a, [2, 3]
+multi_fn = simd_shuffle!, c:int32x2_t, a, a, [2, 3]
multi_fn = simd_cast, d:out_t, c
-multi_fn = simd_shuffle2!, e:int32x2_t, b, b, [2, 3]
+multi_fn = simd_shuffle!, e:int32x2_t, b, b, [2, 3]
multi_fn = simd_cast, f:out_t, e
multi_fn = simd_sub, d, f
@@ -4402,9 +4402,9 @@ generate int32x4_t:int32x4_t:int64x2_t
/// Unsigned Subtract Long
name = vsubl_high
no-q
-multi_fn = simd_shuffle8!, c:uint8x8_t, a, a, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, c:uint8x8_t, a, a, [8, 9, 10, 11, 12, 13, 14, 15]
multi_fn = simd_cast, d:out_t, c
-multi_fn = simd_shuffle8!, e:uint8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, e:uint8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
multi_fn = simd_cast, f:out_t, e
multi_fn = simd_sub, d, f
@@ -4418,9 +4418,9 @@ generate uint8x16_t:uint8x16_t:uint16x8_t
/// Unsigned Subtract Long
name = vsubl_high
no-q
-multi_fn = simd_shuffle4!, c:uint16x4_t, a, a, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, c:uint16x4_t, a, a, [4, 5, 6, 7]
multi_fn = simd_cast, d:out_t, c
-multi_fn = simd_shuffle4!, e:uint16x4_t, b, b, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, e:uint16x4_t, b, b, [4, 5, 6, 7]
multi_fn = simd_cast, f:out_t, e
multi_fn = simd_sub, d, f
@@ -4434,9 +4434,9 @@ generate uint16x8_t:uint16x8_t:uint32x4_t
/// Unsigned Subtract Long
name = vsubl_high
no-q
-multi_fn = simd_shuffle2!, c:uint32x2_t, a, a, [2, 3]
+multi_fn = simd_shuffle!, c:uint32x2_t, a, a, [2, 3]
multi_fn = simd_cast, d:out_t, c
-multi_fn = simd_shuffle2!, e:uint32x2_t, b, b, [2, 3]
+multi_fn = simd_shuffle!, e:uint32x2_t, b, b, [2, 3]
multi_fn = simd_cast, f:out_t, e
multi_fn = simd_sub, d, f
@@ -4545,7 +4545,7 @@ name = vcmla
in2-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_rot-LANE
-multi_fn = simd_shuffle-out_len-!, c:out_t, c, c, {base-2-LANE}
+multi_fn = simd_shuffle!, c:out_t, c, c, {base-2-LANE}
multi_fn = vcmla-self-noext, a, b, c
a = 1., -1., 1., -1.
b = -1., 1., -1., 1.
@@ -4563,7 +4563,7 @@ name = vcmla_rot90
rot-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_rot-LANE
-multi_fn = simd_shuffle-out_len-!, c:out_t, c, c, {base-2-LANE}
+multi_fn = simd_shuffle!, c:out_t, c, c, {base-2-LANE}
multi_fn = vcmla_rot90-rot-noext, a, b, c
a = 1., -1., 1., -1.
b = -1., 1., -1., 1.
@@ -4581,7 +4581,7 @@ name = vcmla_rot180
rot-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_rot-LANE
-multi_fn = simd_shuffle-out_len-!, c:out_t, c, c, {base-2-LANE}
+multi_fn = simd_shuffle!, c:out_t, c, c, {base-2-LANE}
multi_fn = vcmla_rot180-rot-noext, a, b, c
a = 1., -1., 1., -1.
b = -1., 1., -1., 1.
@@ -4599,7 +4599,7 @@ name = vcmla_rot270
rot-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_rot-LANE
-multi_fn = simd_shuffle-out_len-!, c:out_t, c, c, {base-2-LANE}
+multi_fn = simd_shuffle!, c:out_t, c, c, {base-2-LANE}
multi_fn = vcmla_rot270-rot-noext, a, b, c
a = 1., -1., 1., -1.
b = -1., 1., -1., 1.
@@ -4634,7 +4634,7 @@ name = vdot
out-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_dot-LANE
-multi_fn = simd_shuffle-in_len-!, c:in_t, c, c, {base-4-LANE}
+multi_fn = simd_shuffle!, c:in_t, c, c, {base-4-LANE}
multi_fn = vdot-out-noext, a, b, c
a = 1, 2, 1, 2
b = 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8
@@ -4820,7 +4820,7 @@ generate float32x4_t:f32
/// Vector move
name = vmovl_high
no-q
-multi_fn = simd_shuffle-out_len-!, a:half, a, a, {asc-halflen-halflen}
+multi_fn = simd_shuffle!, a:half, a, a, {asc-halflen-halflen}
multi_fn = vmovl-noqself-noext, a
a = 1, 2, 3, 4, 3, 4, 5, 6, 3, 4, 5, 6, 7, 8, 9, 10
validate 3, 4, 5, 6, 7, 8, 9, 10
@@ -4949,8 +4949,8 @@ generate int16x4_t:i16:int32x4_t, int32x2_t:i32:int64x2_t
/// Signed saturating doubling multiply long
name = vqdmull_high
no-q
-multi_fn = simd_shuffle-out_len-!, a:half, a, a, {asc-halflen-halflen}
-multi_fn = simd_shuffle-out_len-!, b:half, b, b, {asc-halflen-halflen}
+multi_fn = simd_shuffle!, a:half, a, a, {asc-halflen-halflen}
+multi_fn = simd_shuffle!, b:half, b, b, {asc-halflen-halflen}
multi_fn = vqdmull-noqself-noext, a, b
a = 0, 1, 4, 5, 4, 5, 6, 7
b = 1, 2, 5, 6, 5, 6, 7, 8
@@ -4962,7 +4962,7 @@ generate int16x8_t:int16x8_t:int32x4_t, int32x4_t:int32x4_t:int64x2_t
/// Signed saturating doubling multiply long
name = vqdmull_high_n
no-q
-multi_fn = simd_shuffle-out_len-!, a:in_ntt, a, a, {asc-out_len-out_len}
+multi_fn = simd_shuffle!, a:in_ntt, a, a, {asc-out_len-out_len}
multi_fn = vdup_n-in_ntt-noext, b:in_ntt, b
multi_fn = vqdmull-in_ntt-noext, a, b
a = 0, 2, 8, 10, 8, 10, 12, 14
@@ -4976,7 +4976,7 @@ generate int16x8_t:i16:int32x4_t, int32x4_t:i32:int64x2_t
name = vqdmull_lane
constn = N
multi_fn = static_assert_imm-in_exp_len-N
-multi_fn = simd_shuffle-out_len-!, b:in_t0, b, b, {dup-out_len-N as u32}
+multi_fn = simd_shuffle!, b:in_t0, b, b, {dup-out_len-N as u32}
multi_fn = vqdmull-noqself-noext, a, b
a = 1, 2, 3, 4
b = 0, 2, 2, 0, 2, 0, 0, 0
@@ -5021,8 +5021,8 @@ generate i32:int32x2_t:i64, i32:int32x4_t:i64
name = vqdmull_high_lane
constn = N
multi_fn = static_assert_imm-in_exp_len-N
-multi_fn = simd_shuffle-out_len-!, a:in_t, a, a, {asc-out_len-out_len}
-multi_fn = simd_shuffle-out_len-!, b:in_t, b, b, {dup-out_len-N as u32}
+multi_fn = simd_shuffle!, a:in_t, a, a, {asc-out_len-out_len}
+multi_fn = simd_shuffle!, b:in_t, b, b, {dup-out_len-N as u32}
multi_fn = vqdmull-self-noext, a, b
a = 0, 1, 4, 5, 4, 5, 6, 7
b = 0, 2, 2, 0, 2, 0, 0, 0
@@ -5036,8 +5036,8 @@ generate int16x8_t:int16x4_t:int32x4_t, int32x4_t:int32x2_t:int64x2_t
name = vqdmull_high_lane
constn = N
multi_fn = static_assert_imm-in_exp_len-N
-multi_fn = simd_shuffle-out_len-!, a:half, a, a, {asc-out_len-out_len}
-multi_fn = simd_shuffle-out_len-!, b:half, b, b, {dup-out_len-N as u32}
+multi_fn = simd_shuffle!, a:half, a, a, {asc-out_len-out_len}
+multi_fn = simd_shuffle!, b:half, b, b, {dup-out_len-N as u32}
multi_fn = vqdmull-noqself-noext, a, b
a = 0, 1, 4, 5, 4, 5, 6, 7
b = 0, 2, 2, 0, 2, 0, 0, 0
@@ -5418,7 +5418,7 @@ generate u64:u32
/// Signed saturating extract narrow
name = vqmovn_high
no-q
-multi_fn = simd_shuffle-out_len-!, a, {vqmovn-noqself-noext, b}, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, {vqmovn-noqself-noext, b}, {asc-0-out_len}
a = MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX
b = MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX
validate MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX
@@ -5452,7 +5452,7 @@ generate i16:u8, i32:u16, i64:u32
/// Signed saturating extract unsigned narrow
name = vqmovun_high
no-q
-multi_fn = simd_shuffle-out_len-!, a, {vqmovun-noqself-noext, b}, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, {vqmovun-noqself-noext, b}, {asc-0-out_len}
a = 0, 0, 0, 0, 0, 0, 0, 0
b = -1, -1, -1, -1, -1, -1, -1, -1
validate 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
@@ -5499,7 +5499,7 @@ name = vqrdmulh
lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in_exp_len-LANE
-multi_fn = simd_shuffle-out_len-!, b:out_t, b, b, {dup-out_len-LANE as u32}
+multi_fn = simd_shuffle!, b:out_t, b, b, {dup-out_len-LANE as u32}
multi_fn = vqrdmulh-out-noext, a, b
a = MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX
b = 0, 2, 0, 0, 0, 0, 0, 0,
@@ -5557,7 +5557,7 @@ name = vqrdmlah
in2-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_exp_len-LANE
-multi_fn = simd_shuffle-out_len-!, c:out_t, c, c, {dup-out_len-LANE as u32}
+multi_fn = simd_shuffle!, c:out_t, c, c, {dup-out_len-LANE as u32}
multi_fn = vqrdmlah-out-noext, a, b, c
a = 1, 1, 1, 1, 1, 1, 1, 1
b = MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX
@@ -5618,7 +5618,7 @@ name = vqrdmlsh
in2-lane-suffixes
constn = LANE
multi_fn = static_assert_imm-in2_exp_len-LANE
-multi_fn = simd_shuffle-out_len-!, c:out_t, c, c, {dup-out_len-LANE as u32}
+multi_fn = simd_shuffle!, c:out_t, c, c, {dup-out_len-LANE as u32}
multi_fn = vqrdmlsh-out-noext, a, b, c
a = 1, 1, 1, 1, 1, 1, 1, 1
b = MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX
@@ -5740,7 +5740,7 @@ name = vqrshrn_high
noq-n-suffix
constn = N
multi_fn = static_assert-N-1-halfbits
-multi_fn = simd_shuffle-out_len-!, a, {vqrshrn_n-noqself-::<N>, b}, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, {vqrshrn_n-noqself-::<N>, b}, {asc-0-out_len}
a = 0, 1, 2, 3, 2, 3, 6, 7
b = 8, 12, 24, 28, 48, 52, 56, 60
n = 2
@@ -5787,7 +5787,7 @@ name = vqrshrn_high
noq-n-suffix
constn = N
multi_fn = static_assert-N-1-halfbits
-multi_fn = simd_shuffle-out_len-!, a, {vqrshrn_n-noqself-::<N>, b}, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, {vqrshrn_n-noqself-::<N>, b}, {asc-0-out_len}
a = 0, 1, 2, 3, 2, 3, 6, 7
b = 8, 12, 24, 28, 48, 52, 56, 60
n = 2
@@ -5834,7 +5834,7 @@ name = vqrshrun_high
noq-n-suffix
constn = N
multi_fn = static_assert-N-1-halfbits
-multi_fn = simd_shuffle-out_len-!, a, {vqrshrun_n-noqself-::<N>, b}, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, {vqrshrun_n-noqself-::<N>, b}, {asc-0-out_len}
a = 0, 1, 2, 3, 2, 3, 6, 7
b = 8, 12, 24, 28, 48, 52, 56, 60
n = 2
@@ -6020,7 +6020,7 @@ name = vqshrn_high
noq-n-suffix
constn = N
multi_fn = static_assert-N-1-halfbits
-multi_fn = simd_shuffle-out_len-!, a, {vqshrn_n-noqself-::<N>, b}, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, {vqshrn_n-noqself-::<N>, b}, {asc-0-out_len}
a = 0, 1, 8, 9, 8, 9, 10, 11
b = 32, 36, 40, 44, 48, 52, 56, 60
n = 2
@@ -6067,7 +6067,7 @@ name = vqshrn_high
noq-n-suffix
constn = N
multi_fn = static_assert-N-1-halfbits
-multi_fn = simd_shuffle-out_len-!, a, {vqshrn_n-noqself-::<N>, b}, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, {vqshrn_n-noqself-::<N>, b}, {asc-0-out_len}
a = 0, 1, 8, 9, 8, 9, 10, 11
b = 32, 36, 40, 44, 48, 52, 56, 60
n = 2
@@ -6113,7 +6113,7 @@ name = vqshrun_high
noq-n-suffix
constn = N
multi_fn = static_assert-N-1-halfbits
-multi_fn = simd_shuffle-out_len-!, a, {vqshrun_n-noqself-::<N>, b}, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, {vqshrun_n-noqself-::<N>, b}, {asc-0-out_len}
a = 0, 1, 8, 9, 8, 9, 10, 11
b = 32, 36, 40, 44, 48, 52, 56, 60
n = 2
@@ -6574,7 +6574,7 @@ name = vrshrn_high
noq-n-suffix
constn = N
multi_fn = static_assert-N-1-halfbits
-multi_fn = simd_shuffle-out_len-!, a, {vrshrn_n-noqself-::<N>, b}, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, {vrshrn_n-noqself-::<N>, b}, {asc-0-out_len}
a = 0, 1, 8, 9, 8, 9, 10, 11
b = 32, 36, 40, 44, 48, 52, 56, 60
n = 2
@@ -6673,7 +6673,7 @@ generate uint16x8_t:uint16x8_t:uint8x8_t, uint32x4_t:uint32x4_t:uint16x4_t, uint
name = vrsubhn_high
no-q
multi_fn = vrsubhn-noqself-noext, x:in_t0, b, c
-multi_fn = simd_shuffle-out_len-!, a, x, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, x, {asc-0-out_len}
a = 1, 2, 0, 0, 0, 0, 0, 0
b = 1, 2, 3, 4, 5, 6, 7, 8
c = 1, 2, 3, 4, 5, 6, 7, 8
@@ -6841,7 +6841,7 @@ name = vshll_high_n
no-q
constn = N
multi_fn = static_assert-N-0-bits
-multi_fn = simd_shuffle-out_len-!, b:half, a, a, {asc-halflen-halflen}
+multi_fn = simd_shuffle!, b:half, a, a, {asc-halflen-halflen}
multi_fn = vshll_n-noqself-::<N>, b
a = 0, 0, 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8
n = 2
@@ -6889,7 +6889,7 @@ name = vshrn_high_n
no-q
constn = N
multi_fn = static_assert-N-1-halfbits
-multi_fn = simd_shuffle-out_len-!, a, {vshrn_n-noqself-::<N>, b}, {asc-0-out_len}
+multi_fn = simd_shuffle!, a, {vshrn_n-noqself-::<N>, b}, {asc-0-out_len}
a = 1, 2, 5, 6, 5, 6, 7, 8
b = 20, 24, 28, 32, 52, 56, 60, 64
n = 2
@@ -7087,8 +7087,8 @@ generate float32x2_t, float32x4_t
/// Transpose elements
name = vtrn
-multi_fn = simd_shuffle-in_len-!, a1:in_t, a, b, {transpose-1-in_len}
-multi_fn = simd_shuffle-in_len-!, b1:in_t, a, b, {transpose-2-in_len}
+multi_fn = simd_shuffle!, a1:in_t, a, b, {transpose-1-in_len}
+multi_fn = simd_shuffle!, b1:in_t, a, b, {transpose-2-in_len}
multi_fn = transmute, (a1, b1)
a = 0, 2, 2, 6, 2, 10, 6, 14, 2, 18, 6, 22, 10, 26, 14, 30
b = 1, 3, 3, 7, 3, 1, 7, 15, 3, 19, 7, 23, 1, 27, 15, 31
@@ -7104,8 +7104,8 @@ generate int32x2_t:int32x2_t:int32x2x2_t, uint32x2_t:uint32x2_t:uint32x2x2_t
/// Transpose elements
name = vtrn
-multi_fn = simd_shuffle-in_len-!, a1:in_t, a, b, {transpose-1-in_len}
-multi_fn = simd_shuffle-in_len-!, b1:in_t, a, b, {transpose-2-in_len}
+multi_fn = simd_shuffle!, a1:in_t, a, b, {transpose-1-in_len}
+multi_fn = simd_shuffle!, b1:in_t, a, b, {transpose-2-in_len}
multi_fn = transmute, (a1, b1)
a = 0., 2., 2., 6.
b = 1., 3., 3., 7.
@@ -7119,7 +7119,7 @@ generate float32x4_t:float32x4_t:float32x4x2_t
/// Transpose vectors
name = vtrn1
-multi_fn = simd_shuffle-in_len-!, a, b, {transpose-1-in_len}
+multi_fn = simd_shuffle!, a, b, {transpose-1-in_len}
a = 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
b = 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
validate 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29
@@ -7132,7 +7132,7 @@ generate int32x2_t, int64x2_t, uint32x2_t, uint64x2_t, poly64x2_t
/// Transpose vectors
name = vtrn1
-multi_fn = simd_shuffle-in_len-!, a, b, {transpose-1-in_len}
+multi_fn = simd_shuffle!, a, b, {transpose-1-in_len}
a = 0., 2., 4., 6., 8., 10., 12., 14.
b = 1., 3., 5., 7., 9., 11., 13., 15.
validate 0., 1., 4., 5., 8., 9., 12., 13.
@@ -7145,7 +7145,7 @@ generate float32x2_t, float64x2_t
/// Transpose vectors
name = vtrn2
-multi_fn = simd_shuffle-in_len-!, a, b, {transpose-2-in_len}
+multi_fn = simd_shuffle!, a, b, {transpose-2-in_len}
a = 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
b = 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
validate 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31
@@ -7158,7 +7158,7 @@ generate int32x2_t, int64x2_t, uint32x2_t, uint64x2_t, poly64x2_t
/// Transpose vectors
name = vtrn2
-multi_fn = simd_shuffle-in_len-!, a, b, {transpose-2-in_len}
+multi_fn = simd_shuffle!, a, b, {transpose-2-in_len}
a = 0., 2., 4., 6., 8., 10., 12., 14.
b = 1., 3., 5., 7., 9., 11., 13., 15.
validate 2., 3., 6., 7., 10., 11., 14., 15.
@@ -7171,8 +7171,8 @@ generate float32x2_t, float64x2_t
/// Zip vectors
name = vzip
-multi_fn = simd_shuffle-in_len-!, a0:in_t, a, b, {zip-1-in_len}
-multi_fn = simd_shuffle-in_len-!, b0:in_t, a, b, {zip-2-in_len}
+multi_fn = simd_shuffle!, a0:in_t, a, b, {zip-1-in_len}
+multi_fn = simd_shuffle!, b0:in_t, a, b, {zip-2-in_len}
multi_fn = transmute, (a0, b0)
a = 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
b = 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
@@ -7193,8 +7193,8 @@ generate poly8x16_t:poly8x16_t:poly8x16x2_t, poly16x8_t:poly16x8_t:poly16x8x2_t
/// Zip vectors
name = vzip
-multi_fn = simd_shuffle-in_len-!, a0:in_t, a, b, {zip-1-in_len}
-multi_fn = simd_shuffle-in_len-!, b0:in_t, a, b, {zip-2-in_len}
+multi_fn = simd_shuffle!, a0:in_t, a, b, {zip-1-in_len}
+multi_fn = simd_shuffle!, b0:in_t, a, b, {zip-2-in_len}
multi_fn = transmute, (a0, b0)
a = 1., 2., 3., 4.
b = 5., 6., 7., 8.
@@ -7209,7 +7209,7 @@ generate float32x4_t:float32x4_t:float32x4x2_t
/// Zip vectors
name = vzip1
-multi_fn = simd_shuffle-in_len-!, a, b, {zip-1-in_len}
+multi_fn = simd_shuffle!, a, b, {zip-1-in_len}
a = 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
b = 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
validate 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
@@ -7219,7 +7219,7 @@ generate int*_t, int64x2_t, uint*_t, uint64x2_t, poly8x8_t, poly8x16_t, poly16x4
/// Zip vectors
name = vzip1
-multi_fn = simd_shuffle-in_len-!, a, b, {zip-1-in_len}
+multi_fn = simd_shuffle!, a, b, {zip-1-in_len}
a = 0., 2., 4., 6., 8., 10., 12., 14.
b = 1., 3., 5., 7., 9., 11., 13., 15.
validate 0., 1., 2., 3., 4., 5., 6., 7.
@@ -7229,7 +7229,7 @@ generate float32x2_t, float32x4_t, float64x2_t
/// Zip vectors
name = vzip2
-multi_fn = simd_shuffle-in_len-!, a, b, {zip-2-in_len}
+multi_fn = simd_shuffle!, a, b, {zip-2-in_len}
a = 0, 16, 16, 18, 16, 18, 20, 22, 16, 18, 20, 22, 24, 26, 28, 30
b = 1, 17, 17, 19, 17, 19, 21, 23, 17, 19, 21, 23, 25, 27, 29, 31
validate 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
@@ -7239,7 +7239,7 @@ generate int*_t, int64x2_t, uint*_t, uint64x2_t, poly8x8_t, poly8x16_t, poly16x4
/// Zip vectors
name = vzip2
-multi_fn = simd_shuffle-in_len-!, a, b, {zip-2-in_len}
+multi_fn = simd_shuffle!, a, b, {zip-2-in_len}
a = 0., 8., 8., 10., 8., 10., 12., 14.
b = 1., 9., 9., 11., 9., 11., 13., 15.
validate 8., 9., 10., 11., 12., 13., 14., 15.
@@ -7249,8 +7249,8 @@ generate float32x2_t, float32x4_t, float64x2_t
/// Unzip vectors
name = vuzp
-multi_fn = simd_shuffle-in_len-!, a0:in_t, a, b, {unzip-1-in_len}
-multi_fn = simd_shuffle-in_len-!, b0:in_t, a, b, {unzip-2-in_len}
+multi_fn = simd_shuffle!, a0:in_t, a, b, {unzip-1-in_len}
+multi_fn = simd_shuffle!, b0:in_t, a, b, {unzip-2-in_len}
multi_fn = transmute, (a0, b0)
a = 1, 2, 2, 3, 2, 3, 3, 8, 2, 3, 3, 8, 3, 15, 8, 16
b = 2, 3, 3, 8, 3, 15, 8, 16, 3, 29, 8, 30, 15, 31, 16, 32
@@ -7267,8 +7267,8 @@ generate int32x2_t:int32x2_t:int32x2x2_t, uint32x2_t:uint32x2_t:uint32x2x2_t
/// Unzip vectors
name = vuzp
-multi_fn = simd_shuffle-in_len-!, a0:in_t, a, b, {unzip-1-in_len}
-multi_fn = simd_shuffle-in_len-!, b0:in_t, a, b, {unzip-2-in_len}
+multi_fn = simd_shuffle!, a0:in_t, a, b, {unzip-1-in_len}
+multi_fn = simd_shuffle!, b0:in_t, a, b, {unzip-2-in_len}
multi_fn = transmute, (a0, b0)
a = 1., 2., 2., 4.
b = 2., 6., 6., 8.
@@ -7283,7 +7283,7 @@ generate float32x4_t:float32x4_t:float32x4x2_t
/// Unzip vectors
name = vuzp1
-multi_fn = simd_shuffle-in_len-!, a, b, {unzip-1-in_len}
+multi_fn = simd_shuffle!, a, b, {unzip-1-in_len}
a = 1, 0, 2, 0, 2, 0, 3, 0, 2, 0, 3, 0, 7, 0, 8, 0
b = 2, 0, 3, 0, 7, 0, 8, 0, 13, 0, 14, 0, 15, 0, 16, 0
validate 1, 2, 2, 3, 2, 3, 7, 8, 2, 3, 7, 8, 13, 14, 15, 16
@@ -7296,7 +7296,7 @@ generate int32x2_t, int64x2_t, uint32x2_t, uint64x2_t, poly64x2_t
/// Unzip vectors
name = vuzp1
-multi_fn = simd_shuffle-in_len-!, a, b, {unzip-1-in_len}
+multi_fn = simd_shuffle!, a, b, {unzip-1-in_len}
a = 0., 8., 1., 9., 4., 12., 5., 13.
b = 1., 10., 3., 11., 6., 14., 7., 15.
validate 0., 1., 1., 3., 4., 5., 6., 7.
@@ -7309,7 +7309,7 @@ generate float32x2_t, float64x2_t
/// Unzip vectors
name = vuzp2
-multi_fn = simd_shuffle-in_len-!, a, b, {unzip-2-in_len}
+multi_fn = simd_shuffle!, a, b, {unzip-2-in_len}
a = 0, 17, 0, 18, 0, 18, 0, 19, 0, 18, 0, 19, 0, 23, 0, 24
b = 0, 18, 0, 19, 0, 23, 0, 24, 0, 29, 0, 30, 0, 31, 0, 32
validate 17, 18, 18, 19, 18, 19, 23, 24, 18, 19, 23, 24, 29, 30, 31, 32
@@ -7322,7 +7322,7 @@ generate int32x2_t, int64x2_t, uint32x2_t, uint64x2_t, poly64x2_t
/// Unzip vectors
name = vuzp2
-multi_fn = simd_shuffle-in_len-!, a, b, {unzip-2-in_len}
+multi_fn = simd_shuffle!, a, b, {unzip-2-in_len}
a = 0., 8., 1., 9., 4., 12., 5., 13.
b = 2., 9., 3., 11., 6., 14., 7., 15.
validate 8., 9., 9., 11., 12., 13., 14., 15.
@@ -7353,8 +7353,8 @@ generate uint16x8_t:uint8x8_t:uint8x8_t:uint16x8_t, uint32x4_t:uint16x4_t:uint16
/// Unsigned Absolute difference and Accumulate Long
name = vabal_high
no-q
-multi_fn = simd_shuffle8!, d:uint8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
-multi_fn = simd_shuffle8!, e:uint8x8_t, c, c, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, d:uint8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, e:uint8x8_t, c, c, [8, 9, 10, 11, 12, 13, 14, 15]
multi_fn = vabd_u8, d, e, f:uint8x8_t
multi_fn = simd_add, a, {simd_cast, f}
a = 9, 10, 11, 12, 13, 14, 15, 16
@@ -7368,8 +7368,8 @@ generate uint16x8_t:uint8x16_t:uint8x16_t:uint16x8_t
/// Unsigned Absolute difference and Accumulate Long
name = vabal_high
no-q
-multi_fn = simd_shuffle4!, d:uint16x4_t, b, b, [4, 5, 6, 7]
-multi_fn = simd_shuffle4!, e:uint16x4_t, c, c, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, d:uint16x4_t, b, b, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, e:uint16x4_t, c, c, [4, 5, 6, 7]
multi_fn = vabd_u16, d, e, f:uint16x4_t
multi_fn = simd_add, a, {simd_cast, f}
a = 9, 10, 11, 12
@@ -7383,8 +7383,8 @@ generate uint32x4_t:uint16x8_t:uint16x8_t:uint32x4_t
/// Unsigned Absolute difference and Accumulate Long
name = vabal_high
no-q
-multi_fn = simd_shuffle2!, d:uint32x2_t, b, b, [2, 3]
-multi_fn = simd_shuffle2!, e:uint32x2_t, c, c, [2, 3]
+multi_fn = simd_shuffle!, d:uint32x2_t, b, b, [2, 3]
+multi_fn = simd_shuffle!, e:uint32x2_t, c, c, [2, 3]
multi_fn = vabd_u32, d, e, f:uint32x2_t
multi_fn = simd_add, a, {simd_cast, f}
a = 15, 16
@@ -7444,8 +7444,8 @@ generate int64x2_t:int32x2_t:int32x2_t:int64x2_t
/// Signed Absolute difference and Accumulate Long
name = vabal_high
no-q
-multi_fn = simd_shuffle8!, d:int8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
-multi_fn = simd_shuffle8!, e:int8x8_t, c, c, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, d:int8x8_t, b, b, [8, 9, 10, 11, 12, 13, 14, 15]
+multi_fn = simd_shuffle!, e:int8x8_t, c, c, [8, 9, 10, 11, 12, 13, 14, 15]
multi_fn = vabd_s8, d, e, f:int8x8_t
multi_fn = simd_cast, f:uint8x8_t, f
multi_fn = simd_add, a, {simd_cast, f}
@@ -7460,8 +7460,8 @@ generate int16x8_t:int8x16_t:int8x16_t:int16x8_t
/// Signed Absolute difference and Accumulate Long
name = vabal_high
no-q
-multi_fn = simd_shuffle4!, d:int16x4_t, b, b, [4, 5, 6, 7]
-multi_fn = simd_shuffle4!, e:int16x4_t, c, c, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, d:int16x4_t, b, b, [4, 5, 6, 7]
+multi_fn = simd_shuffle!, e:int16x4_t, c, c, [4, 5, 6, 7]
multi_fn = vabd_s16, d, e, f:int16x4_t
multi_fn = simd_cast, f:uint16x4_t, f
multi_fn = simd_add, a, {simd_cast, f}
@@ -7476,8 +7476,8 @@ generate int32x4_t:int16x8_t:int16x8_t:int32x4_t
/// Signed Absolute difference and Accumulate Long
name = vabal_high
no-q
-multi_fn = simd_shuffle2!, d:int32x2_t, b, b, [2, 3]
-multi_fn = simd_shuffle2!, e:int32x2_t, c, c, [2, 3]
+multi_fn = simd_shuffle!, d:int32x2_t, b, b, [2, 3]
+multi_fn = simd_shuffle!, e:int32x2_t, c, c, [2, 3]
multi_fn = vabd_s32, d, e, f:int32x2_t
multi_fn = simd_cast, f:uint32x2_t, f
multi_fn = simd_add, a, {simd_cast, f}
@@ -7490,10 +7490,10 @@ aarch64 = sabal
generate int64x2_t:int32x4_t:int32x4_t:int64x2_t
////////////////////
-// Singned saturating Absolute value
+// Signed saturating Absolute value
////////////////////
-/// Singned saturating Absolute value
+/// Signed saturating Absolute value
name = vqabs
a = MIN, MAX, -6, -5, -4, -3, -2, -1, 0, -127, 127, 1, 2, 3, 4, 5
validate MAX, MAX, 6, 5, 4, 3, 2, 1, 0, 127, 127, 1, 2, 3, 4, 5
@@ -7504,7 +7504,7 @@ link-arm = vqabs._EXT_
link-aarch64 = sqabs._EXT_
generate int*_t
-/// Singned saturating Absolute value
+/// Signed saturating Absolute value
name = vqabs
a = MIN, -7
validate MAX, 7
diff --git a/library/stdarch/crates/stdarch-gen/src/main.rs b/library/stdarch/crates/stdarch-gen/src/main.rs
index 750e88091..652aee88c 100644
--- a/library/stdarch/crates/stdarch-gen/src/main.rs
+++ b/library/stdarch/crates/stdarch-gen/src/main.rs
@@ -1,6 +1,7 @@
use self::Suffix::*;
use self::TargetFeature::*;
use std::env;
+use std::fmt;
use std::fs::File;
use std::io::prelude::*;
use std::io::{self, BufReader};
@@ -470,6 +471,199 @@ enum TargetFeature {
FTTS,
}
+impl TargetFeature {
+ /// A string for use with `#[target_feature(...)]`.
+ fn as_target_feature_arg_aarch64(&self) -> &str {
+ match *self {
+ // Features included with AArch64 NEON.
+ Self::Default => "neon",
+ Self::ArmV7 => "neon",
+ Self::Vfp4 => "neon",
+ Self::FPArmV8 => "neon",
+ // Optional features.
+ Self::AES => "neon,aes",
+ Self::FCMA => "neon,fcma",
+ Self::Dotprod => "neon,dotprod",
+ Self::I8MM => "neon,i8mm",
+ Self::SHA3 => "neon,sha3",
+ Self::RDM => "rdm",
+ Self::SM4 => "neon,sm4",
+ Self::FTTS => "neon,frintts",
+ }
+ }
+
+ /// A string for use with #[simd_test(...)] (or `is_aarch64_feature_detected!(...)`).
+ fn as_simd_test_arg_aarch64(&self) -> &str {
+ self.as_target_feature_arg_aarch64()
+ }
+
+ /// A string for use with `#[target_feature(...)]`.
+ fn as_target_feature_arg_arm(&self) -> &str {
+ match *self {
+ Self::Default => "neon,v7",
+ Self::ArmV7 => "neon,v7",
+ Self::Vfp4 => "neon,vfp4",
+ Self::FPArmV8 => "neon,fp-armv8,v8",
+ Self::AES => "neon,v8,aes",
+ Self::FCMA => "neon,v8,fcma",
+ Self::Dotprod => "neon,v8,dotprod",
+ Self::I8MM => "neon,v8,i8mm",
+ // Features not supported on 32-bit "arm".
+ Self::SHA3 => unimplemented!(),
+ Self::RDM => unimplemented!(),
+ Self::SM4 => unimplemented!(),
+ Self::FTTS => unimplemented!(),
+ }
+ }
+
+ /// A string for use with #[simd_test(...)] (or `is_arm_feature_detected!(...)`).
+ fn as_simd_test_arg_arm(&self) -> &str {
+ // TODO: Ideally, these would match the target_feature strings (as for AArch64).
+ match *self {
+ // We typically specify the "v7" or "v8" target_features for codegen, but we can't test
+ // them at runtime. However, in many cases we can test a specific named feature, and
+ // this is sufficient. For example, Neon always requires at least Armv7.
+
+ // "v7" extensions.
+ Self::Default => "neon",
+ Self::ArmV7 => "neon",
+
+ // TODO: We can specify these features for code generation, but they have no runtime
+ // detection, so we can't provide an accurate string for simd_test. For now, we use a
+ // common Armv8 feature as a proxy, but we should improve std_detect support here and
+ // update these accordingly.
+ Self::Vfp4 => "neon,crc",
+ Self::FPArmV8 => "neon,crc",
+
+ // "v8" extensions.
+ Self::AES => "neon,aes",
+ Self::FCMA => "neon,fcma",
+ Self::Dotprod => "neon,dotprod",
+ Self::I8MM => "neon,i8mm",
+
+ // Features not supported on 32-bit "arm".
+ Self::SHA3 => unimplemented!(),
+ Self::RDM => unimplemented!(),
+ Self::SM4 => unimplemented!(),
+ Self::FTTS => unimplemented!(),
+ }
+ }
+
+ fn attr(name: &str, value: impl fmt::Display) -> String {
+ format!(r#"#[{name}(enable = "{value}")]"#)
+ }
+
+ fn attr_for_arch(arch: &str, name: &str, value: impl fmt::Display) -> String {
+ format!(r#"#[cfg_attr(target_arch = "{arch}", {name}(enable = "{value}"))]"#)
+ }
+
+ /// Generate target_feature attributes for a test that will compile for both "arm" and "aarch64".
+ fn to_target_feature_attr_shared(&self) -> Lines {
+ let arm = self.as_target_feature_arg_arm().split(",");
+ let aarch64 = self.as_target_feature_arg_aarch64().split(",");
+
+ // Combine common features into an unconditional `target_feature` annotation, but guard
+ // others behind `cfg_attr`.
+ // TODO: It's much simpler to emit separate, guarded attributes for each architecture (as
+ // for `simd_test`). However, this has an unfortunate impact on documentation, since
+ // rustdoc can't currently look inside `cfg_attr` (stdarch/issues/1268).
+ let mut aarch64: Vec<_> = aarch64.collect();
+ let (both, arm): (Vec<_>, Vec<_>) = arm.partition(|v| aarch64.contains(v));
+ aarch64.retain(|v| !both.contains(v));
+ let mut lines = Vec::new();
+ if !both.is_empty() {
+ lines.push(Self::attr("target_feature", both.join(",")));
+ };
+ if !arm.is_empty() {
+ lines.push(Self::attr_for_arch("arm", "target_feature", arm.join(",")));
+ }
+ if !aarch64.is_empty() {
+ lines.push(Self::attr_for_arch(
+ "aarch64",
+ "target_feature",
+ aarch64.join(","),
+ ));
+ }
+ lines.into()
+ }
+
+ /// Generate a target_feature attribute for a test that will compile only for "aarch64".
+ fn to_target_feature_attr_aarch64(&self) -> Lines {
+ Lines::single(Self::attr(
+ "target_feature",
+ self.as_target_feature_arg_aarch64(),
+ ))
+ }
+
+ /// Generate a target_feature attribute for a test that will compile only for "arm".
+ fn to_target_feature_attr_arm(&self) -> Lines {
+ Lines::single(Self::attr(
+ "target_feature",
+ self.as_target_feature_arg_arm(),
+ ))
+ }
+
+ /// Generate simd_test attributes for a test that will compile for both "arm" and "aarch64".
+ fn to_simd_test_attr_shared(&self) -> Lines {
+ let arm = self.as_simd_test_arg_arm();
+ let aarch64 = self.as_simd_test_arg_aarch64();
+ if arm == aarch64 {
+ Lines::single(Self::attr("simd_test", arm))
+ } else {
+ vec![
+ Self::attr_for_arch("arm", "simd_test", arm),
+ Self::attr_for_arch("aarch64", "simd_test", aarch64),
+ ]
+ .into()
+ }
+ }
+
+ /// Generate a simd_test attribute for a test that will compile only for "aarch64".
+ fn to_simd_test_attr_aarch64(&self) -> Lines {
+ Lines::single(Self::attr("simd_test", self.as_simd_test_arg_aarch64()))
+ }
+}
+
+/// Complete lines of generated source.
+///
+/// This enables common generation tasks to be factored out without precluding basic
+/// context-specific formatting.
+///
+/// The convention in this generator is to prefix (not suffix) lines with a newline, so the
+/// implementation of `std::fmt::Display` behaves in the same way.
+struct Lines {
+ indent: usize,
+ lines: Vec<String>,
+}
+
+impl Lines {
+ fn indented(self, indent: usize) -> Self {
+ Self {
+ indent: indent + self.indent,
+ ..self
+ }
+ }
+
+ fn single(line: String) -> Self {
+ Self::from(vec![line])
+ }
+}
+
+impl From<Vec<String>> for Lines {
+ fn from(lines: Vec<String>) -> Self {
+ Self { indent: 0, lines }
+ }
+}
+
+impl std::fmt::Display for Lines {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result {
+ for line in self.lines.iter() {
+ write!(f, "\n{:width$}{line}", "", width = self.indent)?;
+ }
+ Ok(())
+ }
+}
+
#[derive(Clone, Copy)]
enum Fntype {
Normal,
@@ -1106,20 +1300,6 @@ fn gen_aarch64(
Rot => type_to_rot_suffix(current_name, type_to_suffix(out_t)),
RotLane => type_to_rot_suffix(current_name, &type_to_lane_suffixes(out_t, in_t[2], false)),
};
- let current_target = match target {
- Default => "neon",
- ArmV7 => "neon",
- Vfp4 => "neon",
- FPArmV8 => "neon",
- AES => "neon,aes",
- FCMA => "neon,fcma",
- Dotprod => "neon,dotprod",
- I8MM => "neon,i8mm",
- SHA3 => "neon,sha3",
- RDM => "rdm",
- SM4 => "neon,sm4",
- FTTS => "neon,frintts",
- };
let current_fn = if let Some(current_fn) = current_fn.clone() {
if link_aarch64.is_some() {
panic!("[{name}] Can't specify link and fn at the same time.")
@@ -1267,7 +1447,6 @@ fn gen_aarch64(
calls.push_str(&get_call(
&multi_fn[i],
current_name,
- &const_declare,
in_t,
out_t,
fixed,
@@ -1415,33 +1594,18 @@ fn gen_aarch64(
RDM => String::from("\n#[stable(feature = \"rdm_intrinsics\", since = \"1.62.0\")]"),
_ => String::new(),
};
- let function_doc = create_doc_string(current_comment, &name);
let function = format!(
r#"
-{}
-#[inline]
-#[target_feature(enable = "{}")]
-#[cfg_attr(test, assert_instr({}{}))]{}{}
-{}{{
- {}
+{function_doc}
+#[inline]{target_feature}
+#[cfg_attr(test, assert_instr({current_aarch64}{const_assert}))]{const_legacy}{stable}
+{fn_decl}{{
+ {call_params}
}}
"#,
- function_doc,
- current_target,
- current_aarch64,
- const_assert,
- const_legacy,
- stable,
- fn_decl,
- call_params
+ function_doc = create_doc_string(current_comment, &name),
+ target_feature = target.to_target_feature_attr_aarch64()
);
- let test_target = match target {
- I8MM => "neon,i8mm",
- SM4 => "neon,sm4",
- SHA3 => "neon,sha3",
- FTTS => "neon,frintts",
- _ => "neon",
- };
let test = match fn_type {
Fntype::Normal => gen_test(
&name,
@@ -1451,7 +1615,7 @@ fn gen_aarch64(
[type_len(in_t[0]), type_len(in_t[1]), type_len(in_t[2])],
type_len(out_t),
para_num,
- test_target,
+ target.to_simd_test_attr_aarch64(),
),
Fntype::Load => gen_load_test(&name, in_t, &out_t, current_tests, type_len(out_t)),
Fntype::Store => gen_store_test(&name, in_t, &out_t, current_tests, type_len(in_t[1])),
@@ -1473,10 +1637,9 @@ fn gen_load_test(
type_len: usize,
) -> String {
let mut test = format!(
- r#"
- #[simd_test(enable = "neon")]
- unsafe fn test_{}() {{"#,
- name,
+ r#"{simd_test}
+ unsafe fn test_{name}() {{"#,
+ simd_test = Default.to_simd_test_attr_shared().indented(4)
);
for (a, b, _, n, e) in current_tests {
let a: Vec<String> = a.iter().take(type_len + 1).cloned().collect();
@@ -1571,10 +1734,9 @@ fn gen_store_test(
type_len: usize,
) -> String {
let mut test = format!(
- r#"
- #[simd_test(enable = "neon")]
- unsafe fn test_{}() {{"#,
- name,
+ r#"{simd_test}
+ unsafe fn test_{name}() {{"#,
+ simd_test = Default.to_simd_test_attr_shared().indented(4)
);
for (a, _, _, constn, e) in current_tests {
let a: Vec<String> = a.iter().take(type_len + 1).cloned().collect();
@@ -1639,14 +1801,10 @@ fn gen_test(
len_in: [usize; 3],
len_out: usize,
para_num: i32,
- target: &str,
+ attributes: Lines,
) -> String {
- let mut test = format!(
- r#"
- #[simd_test(enable = "{}")]
- unsafe fn test_{}() {{"#,
- target, name,
- );
+ let mut test = attributes.indented(4).to_string();
+ test.push_str(&format!("\n unsafe fn test_{name}() {{"));
for (a, b, c, n, e) in current_tests {
let a: Vec<String> = a.iter().take(len_in[0]).cloned().collect();
let b: Vec<String> = b.iter().take(len_in[1]).cloned().collect();
@@ -1833,34 +1991,6 @@ fn gen_arm(
let current_aarch64 = current_aarch64
.clone()
.unwrap_or_else(|| current_arm.to_string());
- let current_target_aarch64 = match target {
- Default => "neon",
- ArmV7 => "neon",
- Vfp4 => "neon",
- FPArmV8 => "neon",
- AES => "neon,aes",
- FCMA => "neon,fcma",
- Dotprod => "neon,dotprod",
- I8MM => "neon,i8mm",
- SHA3 => "neon,sha3",
- RDM => "rdm",
- SM4 => "neon,sm4",
- FTTS => "neon,frintts",
- };
- let current_target_arm = match target {
- Default => "v7",
- ArmV7 => "v7",
- Vfp4 => "vfp4",
- FPArmV8 => "fp-armv8,v8",
- AES => "aes,v8",
- FCMA => "v8", // v8.3a
- Dotprod => "v8", // v8.2a
- I8MM => "v8,i8mm",
- RDM => unreachable!(),
- SM4 => unreachable!(),
- SHA3 => unreachable!(),
- FTTS => unreachable!(),
- };
let current_fn = if let Some(current_fn) = current_fn.clone() {
if link_aarch64.is_some() || link_arm.is_some() {
panic!(
@@ -2182,7 +2312,6 @@ fn gen_arm(
calls.push_str(&get_call(
&multi_fn[i],
current_name,
- &const_declare,
in_t,
out_t,
fixed,
@@ -2378,33 +2507,22 @@ fn gen_arm(
let function_doc = create_doc_string(current_comment, &name);
format!(
r#"
-{}
+{function_doc}
#[inline]
-#[cfg(target_arch = "arm")]
-#[target_feature(enable = "neon,{}")]
-#[cfg_attr(test, assert_instr({}{}))]{}
-{}
+#[cfg(target_arch = "arm")]{target_feature_arm}
+#[cfg_attr(test, assert_instr({assert_arm}{const_assert}))]{const_legacy}
+{call_arm}
-{}
+{function_doc}
#[inline]
-#[cfg(target_arch = "aarch64")]
-#[target_feature(enable = "{}")]
-#[cfg_attr(test, assert_instr({}{}))]{}{}
-{}
+#[cfg(target_arch = "aarch64")]{target_feature_aarch64}
+#[cfg_attr(test, assert_instr({assert_aarch64}{const_assert}))]{const_legacy}{stable_aarch64}
+{call_aarch64}
"#,
- function_doc,
- current_target_arm,
- expand_intrinsic(&current_arm, in_t[1]),
- const_assert,
- const_legacy,
- call_arm,
- function_doc,
- current_target_aarch64,
- expand_intrinsic(&current_aarch64, in_t[1]),
- const_assert,
- const_legacy,
- stable_aarch64,
- call_aarch64,
+ target_feature_arm = target.to_target_feature_attr_arm(),
+ target_feature_aarch64 = target.to_target_feature_attr_aarch64(),
+ assert_arm = expand_intrinsic(&current_arm, in_t[1]),
+ assert_aarch64 = expand_intrinsic(&current_aarch64, in_t[1]),
)
} else {
let call = {
@@ -2444,36 +2562,20 @@ fn gen_arm(
RDM => String::from("\n#[cfg_attr(not(target_arch = \"arm\"), stable(feature = \"rdm_intrinsics\", since = \"1.62.0\"))]"),
_ => String::new(),
};
- let function_doc = create_doc_string(current_comment, &name);
format!(
r#"
-{}
-#[inline]
-#[target_feature(enable = "{}")]
-#[cfg_attr(target_arch = "arm", target_feature(enable = "{}"))]
-#[cfg_attr(all(test, target_arch = "arm"), assert_instr({}{}))]
-#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr({}{}))]{}{}
-{}
+{function_doc}
+#[inline]{target_feature}
+#[cfg_attr(all(test, target_arch = "arm"), assert_instr({assert_arm}{const_assert}))]
+#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr({assert_aarch64}{const_assert}))]{const_legacy}{stable_aarch64}
+{call}
"#,
- function_doc,
- current_target_aarch64,
- current_target_arm,
- expand_intrinsic(&current_arm, in_t[1]),
- const_assert,
- expand_intrinsic(&current_aarch64, in_t[1]),
- const_assert,
- const_legacy,
- stable_aarch64,
- call,
+ function_doc = create_doc_string(current_comment, &name),
+ assert_arm = expand_intrinsic(&current_arm, in_t[1]),
+ assert_aarch64 = expand_intrinsic(&current_aarch64, in_t[1]),
+ target_feature = target.to_target_feature_attr_shared(),
)
};
- let test_target = match target {
- I8MM => "neon,i8mm",
- SM4 => "neon,sm4",
- SHA3 => "neon,sha3",
- FTTS => "neon,frintts",
- _ => "neon",
- };
let test = match fn_type {
Fntype::Normal => gen_test(
&name,
@@ -2483,7 +2585,7 @@ fn gen_arm(
[type_len(in_t[0]), type_len(in_t[1]), type_len(in_t[2])],
type_len(out_t),
para_num,
- test_target,
+ target.to_simd_test_attr_shared(),
),
Fntype::Load => gen_load_test(&name, in_t, &out_t, current_tests, type_len(out_t)),
Fntype::Store => gen_store_test(&name, in_t, &out_t, current_tests, type_len(in_t[1])),
@@ -2603,7 +2705,6 @@ fn expand_intrinsic(intr: &str, t: &str) -> String {
fn get_call(
in_str: &str,
current_name: &str,
- const_declare: &str,
in_t: &[&str; 3],
out_t: &str,
fixed: &Vec<String>,
@@ -2643,7 +2744,7 @@ fn get_call(
"halflen" => type_len(in_t[1]) / 2,
_ => 0,
};
- let mut s = format!("{const_declare} [");
+ let mut s = format!("[");
for i in 0..len {
if i != 0 {
s.push_str(", ");
@@ -2674,7 +2775,7 @@ fn get_call(
if fn_name.starts_with("base") {
let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect();
assert_eq!(fn_format.len(), 3);
- let mut s = format!("<const {}: i32> [", &fn_format[2]);
+ let mut s = format!("[");
let base_len = fn_format[1].parse::<usize>().unwrap();
for i in 0..type_len(in_t[1]) / base_len {
for j in 0..base_len {
@@ -2714,7 +2815,7 @@ fn get_call(
"in0_len" => type_len(in_t[0]),
_ => 0,
};
- let mut s = format!("{const_declare} [");
+ let mut s = format!("[");
for i in 0..len {
if i != 0 {
s.push_str(", ");
@@ -2743,12 +2844,9 @@ fn get_call(
_ => 0,
};
if len == 0 {
- return format!(
- r#"static_assert!({} : i32 where {} == 0);"#,
- fn_format[2], fn_format[2]
- );
+ return format!(r#"static_assert!({} == 0);"#, fn_format[2]);
} else {
- return format!(r#"static_assert_imm{len}!({});"#, fn_format[2]);
+ return format!(r#"static_assert_uimm_bits!({}, {len});"#, fn_format[2]);
}
}
if fn_name.starts_with("static_assert") {
@@ -2768,14 +2866,11 @@ fn get_call(
fn_format[3].clone()
};
if lim1 == lim2 {
- return format!(
- r#"static_assert!({} : i32 where {} == {lim1});"#,
- fn_format[1], fn_format[1]
- );
+ return format!(r#"static_assert!({} == {lim1});"#, fn_format[1]);
} else {
return format!(
- r#"static_assert!({} : i32 where {} >= {lim1} && {} <= {lim2});"#,
- fn_format[1], fn_format[1], fn_format[1]
+ r#"static_assert!({} >= {lim1} && {} <= {lim2});"#,
+ fn_format[1], fn_format[1]
);
}
}
@@ -2824,7 +2919,6 @@ fn get_call(
get_call(
&sub_call,
current_name,
- const_declare,
in_t,
out_t,
fixed,
@@ -2873,7 +2967,6 @@ fn get_call(
let sub_call = get_call(
&sub_fn[1..sub_fn.len() - 1],
current_name,
- const_declare,
in_t,
out_t,
fixed,
diff --git a/library/stdarch/crates/stdarch-test/Cargo.toml b/library/stdarch/crates/stdarch-test/Cargo.toml
index 23bddeda6..ce5705c6e 100644
--- a/library/stdarch/crates/stdarch-test/Cargo.toml
+++ b/library/stdarch/crates/stdarch-test/Cargo.toml
@@ -18,7 +18,7 @@ cfg-if = "1.0"
# time, and we want to make updates to this explicit rather than automatically
# picking up updates which might break CI with new instruction names.
[target.'cfg(target_arch = "wasm32")'.dependencies]
-wasmprinter = "=0.2.24"
+wasmprinter = "=0.2.53"
[features]
default = []
diff --git a/library/stdarch/crates/stdarch-test/src/lib.rs b/library/stdarch/crates/stdarch-test/src/lib.rs
index e0cf46cb4..61bbff2f9 100644
--- a/library/stdarch/crates/stdarch-test/src/lib.rs
+++ b/library/stdarch/crates/stdarch-test/src/lib.rs
@@ -115,7 +115,7 @@ pub fn assert(shim_addr: usize, fnname: &str, expected: &str) {
"cpuid" => 30,
// Apparently, on Windows, LLVM generates a bunch of
- // saves/restores of xmm registers around these intstructions,
+ // saves/restores of xmm registers around these instructions,
// which exceeds the limit of 20 below. As it seems dictated by
// Windows's ABI (I believe?), we probably can't do much
// about it.
diff --git a/library/stdarch/crates/stdarch-verify/x86-intel.xml b/library/stdarch/crates/stdarch-verify/x86-intel.xml
index 264ecee0e..c875b4809 100644
--- a/library/stdarch/crates/stdarch-verify/x86-intel.xml
+++ b/library/stdarch/crates/stdarch-verify/x86-intel.xml
@@ -350,7 +350,7 @@ zero_tileconfig_start()
<parameter type="__tile" varname="dst"/>
<parameter type="const void *" varname="base"/>
<parameter type="int" varname="stride" etype="UI32"/>
- <description>Load tile rows from memory specifieid by "base" address and "stride" into destination tile "dst" using the tile configuration previously configured via "_tile_loadconfig".</description>
+ <description>Load tile rows from memory specified by "base" address and "stride" into destination tile "dst" using the tile configuration previously configured via "_tile_loadconfig".</description>
<operation>start := tileconfig.startRow
IF start == 0 // not restarting, zero incoming state
tilezero(dst)
@@ -375,7 +375,7 @@ zero_tileconfig_start()
<parameter type="__tile" varname="dst"/>
<parameter type="const void *" varname="base"/>
<parameter type="int" varname="stride" etype="UI32"/>
- <description>Load tile rows from memory specifieid by "base" address and "stride" into destination tile "dst" using the tile configuration previously configured via "_tile_loadconfig". This intrinsic provides a hint to the implementation that the data will likely not be reused in the near future and the data caching can be optimized accordingly.</description>
+ <description>Load tile rows from memory specified by "base" address and "stride" into destination tile "dst" using the tile configuration previously configured via "_tile_loadconfig". This intrinsic provides a hint to the implementation that the data will likely not be reused in the near future and the data caching can be optimized accordingly.</description>
<operation>start := tileconfig.startRow
IF start == 0 // not restarting, zero incoming state
tilezero(dst)
@@ -409,7 +409,7 @@ zero_tileconfig_start()
<parameter type="__tile" varname="src" />
<parameter type="void *" varname="base"/>
<parameter type="int" varname="stride" etype="UI32"/>
- <description>Store the tile specified by "src" to memory specifieid by "base" address and "stride" using the tile configuration previously configured via "_tile_loadconfig".</description>
+ <description>Store the tile specified by "src" to memory specified by "base" address and "stride" using the tile configuration previously configured via "_tile_loadconfig".</description>
<operation>start := tileconfig.startRow
DO WHILE start &lt; src.rows
memptr := base + start * stride
@@ -102068,7 +102068,7 @@ dst[MAX:512] := 0
<category>Trigonometry</category>
<return type="__m512" varname="dst" etype="FP32"/>
<parameter type="__m512" varname="a" etype="FP32"/>
- <description>Compute the inverse hyperblic tangent of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" expressed in radians.</description>
+ <description>Compute the inverse hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" expressed in radians.</description>
<operation>
FOR j := 0 to 15
i := j*32
@@ -124016,7 +124016,7 @@ ESAC
<return type="void"/>
<parameter type="char const*" varname="p" etype="UI8"/>
<parameter type="int" varname="i" etype="IMM" immwidth="2"/>
- <description>Fetch the line of data from memory that contains address "p" to a location in the cache heirarchy specified by the locality hint "i".</description>
+ <description>Fetch the line of data from memory that contains address "p" to a location in the cache hierarchy specified by the locality hint "i".</description>
<instruction name="VPREFETCH0" form="m8"/>
<instruction name="VPREFETCH1" form="m8"/>
<instruction name="VPREFETCH2" form="m8"/>
@@ -127958,7 +127958,7 @@ dst[MAX:512] := 0
<parameter type="void const *" varname="mt" etype="FP64"/>
<parameter type="_MM_UPCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PD"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed double-precision (64-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elemenst are skipped when the corresponding mask bit is not set).</description>
+ <description>Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed double-precision (64-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>
DEFINE UPCONVERT(addr, offset, convertTo) {
CASE conv OF
@@ -128000,7 +128000,7 @@ dst[MAX:512] := 0
<parameter type="__m512i" varname="v1" etype="UI32"/>
<parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <description>Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal.</description>
<operation>DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
_MM_DOWNCONV_EPI32_NONE:
@@ -128065,7 +128065,7 @@ dst[MAX:512] := 0
<parameter type="__m512i" varname="v1" etype="UI32"/>
<parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresonding mask bit is not set).</description>
+ <description>Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
_MM_DOWNCONV_EPI32_NONE:
@@ -128131,7 +128131,7 @@ dst[MAX:512] := 0
<parameter type="__m512i" varname="v1" etype="UI32"/>
<parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <description>Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt"). "hint" indicates to the processor whether the data is non-temporal.</description>
<operation>
DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
@@ -128193,7 +128193,7 @@ dst[MAX:512] := 0
<parameter type="__m512i" varname="v1" etype="UI32"/>
<parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are written to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <description>Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are written to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>
DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
@@ -128256,7 +128256,7 @@ dst[MAX:512] := 0
<parameter type="__m512i" varname="v1" etype="UI64"/>
<parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <description>Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal.</description>
<operation>DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
_MM_UPCONV_EPI64_NONE:
@@ -128303,7 +128303,7 @@ dst[MAX:512] := 0
<parameter type="__m512i" varname="v1" etype="UI64"/>
<parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (mt-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresonding mask bit is not set).</description>
+ <description>Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (mt-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
_MM_UPCONV_EPI64_NONE:
@@ -128351,7 +128351,7 @@ dst[MAX:512] := 0
<parameter type="__m512i" varname="v1" etype="UI64"/>
<parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <description>Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt"). "hint" indicates to the processor whether the data is non-temporal.</description>
<operation>
DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
@@ -128395,7 +128395,7 @@ dst[MAX:512] := 0
<parameter type="__m512i" varname="v1" etype="UI64"/>
<parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped whent he corresponding mask bit is not set).</description>
+ <description>Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>
DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
@@ -128440,7 +128440,7 @@ dst[MAX:512] := 0
<parameter type="__m512" varname="v1" etype="FP32"/>
<parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <description>Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal.</description>
<operation>DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
_MM_UPCONV_PS_NONE:
@@ -128509,7 +128509,7 @@ dst[MAX:512] := 0
<parameter type="__m512" varname="v1" etype="FP32"/>
<parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <description>Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
_MM_UPCONV_PS_NONE:
@@ -128579,7 +128579,7 @@ dst[MAX:512] := 0
<parameter type="__m512" varname="v1" etype="FP32"/>
<parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <description>Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt"). "hint" indicates to the processor whether the data is non-temporal.</description>
<operation>
DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
@@ -128645,7 +128645,7 @@ dst[MAX:512] := 0
<parameter type="__m512" varname="v1" etype="FP32"/>
<parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <description>Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>
DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
@@ -128712,7 +128712,7 @@ dst[MAX:512] := 0
<parameter type="__m512d" varname="v1" etype="FP64"/>
<parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <description>Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal.</description>
<operation>DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
_MM_UPCONV_PD_NONE:
@@ -128759,7 +128759,7 @@ dst[MAX:512] := 0
<parameter type="__m512d" varname="v1" etype="FP64"/>
<parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <description>Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
_MM_UPCONV_PD_NONE:
@@ -128807,7 +128807,7 @@ dst[MAX:512] := 0
<parameter type="__m512d" varname="v1" etype="FP64"/>
<parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <description>Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt"). "hint" indicates to the processor whether the data is non-temporal.</description>
<operation>
DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
@@ -128851,7 +128851,7 @@ dst[MAX:512] := 0
<parameter type="__m512d" varname="v1" etype="FP64"/>
<parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
- <description>Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <description>Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>
DEFINE DOWNCONVERT(element, convertTo) {
CASE convertTo OF
@@ -129460,7 +129460,7 @@ ENDFOR
<return type="void"/>
<parameter type="void*" varname="mt" etype="UI32" memwidth="512"/>
<parameter type="__m512i" varname="v1" etype="UI32"/>
- <description>Stores packed 32-bit integer elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt").</description>
+ <description>Stores packed 32-bit integer elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt").</description>
<operation>
storeOffset := 0
addr := mt
@@ -129484,7 +129484,7 @@ ENDFOR
<parameter type="void*" varname="mt" etype="UI32" memwidth="512"/>
<parameter type="__mmask16" varname="k" etype="MASK"/>
<parameter type="__m512i" varname="v1" etype="UI32"/>
- <description>Stores packed 32-bit integer elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <description>Stores packed 32-bit integer elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>
storeOffset := 0
addr := mt
@@ -129509,7 +129509,7 @@ ENDFOR
<return type="void"/>
<parameter type="void*" varname="mt" etype="UI64" memwidth="512"/>
<parameter type="__m512i" varname="v1" etype="UI64"/>
- <description>Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)).</description>
+ <description>Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)).</description>
<operation>
storeOffset := 0
foundNext64BytesBoundary := 0
@@ -129537,7 +129537,7 @@ ENDFOR
<parameter type="void*" varname="mt" etype="UI64" memwidth="512"/>
<parameter type="__mmask8" varname="k" etype="MASK"/>
<parameter type="__m512i" varname="v1" etype="UI64"/>
- <description>Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <description>Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>
storeOffset := 0
foundNext64BytesBoundary := 0
@@ -129566,7 +129566,7 @@ ENDFOR
<return type="void"/>
<parameter type="void*" varname="mt" etype="UI64" memwidth="512"/>
<parameter type="__m512i" varname="v1" etype="UI64"/>
- <description>Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt").</description>
+ <description>Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt").</description>
<operation>
storeOffset := 0
addr := mt
@@ -129590,7 +129590,7 @@ ENDFOR
<parameter type="void*" varname="mt" etype="UI64" memwidth="512"/>
<parameter type="__mmask8" varname="k" etype="MASK"/>
<parameter type="__m512i" varname="v1" etype="UI64"/>
- <description>Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <description>Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>
storeOffset := 0
addr := mt
@@ -129615,7 +129615,7 @@ ENDFOR
<return type="void"/>
<parameter type="void*" varname="mt" etype="FP32" memwidth="512"/>
<parameter type="__m512" varname="v1" etype="FP32"/>
- <description>Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)).</description>
+ <description>Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)).</description>
<operation>
storeOffset := 0
foundNext64BytesBoundary := 0
@@ -129643,7 +129643,7 @@ ENDFOR
<parameter type="void*" varname="mt" etype="FP32" memwidth="512"/>
<parameter type="__mmask16" varname="k" etype="MASK"/>
<parameter type="__m512" varname="v1" etype="FP32"/>
- <description>Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <description>Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>
storeOffset := 0
foundNext64BytesBoundary := 0
@@ -129672,7 +129672,7 @@ ENDFOR
<return type="void"/>
<parameter type="void*" varname="mt" etype="FP32" memwidth="512"/>
<parameter type="__m512" varname="v1" etype="FP32"/>
- <description>Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt").</description>
+ <description>Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt").</description>
<operation>
storeOffset := 0
addr := mt
@@ -129696,7 +129696,7 @@ ENDFOR
<parameter type="void*" varname="mt" etype="FP32" memwidth="512"/>
<parameter type="__mmask16" varname="k" etype="MASK"/>
<parameter type="__m512" varname="v1" etype="FP32"/>
- <description>Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <description>Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>
storeOffset := 0
addr := mt
@@ -129721,7 +129721,7 @@ ENDFOR
<return type="void"/>
<parameter type="void*" varname="mt" etype="FP64" memwidth="512"/>
<parameter type="__m512d" varname="v1" etype="FP64"/>
- <description>Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)).</description>
+ <description>Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)).</description>
<operation>
storeOffset := 0
foundNext64BytesBoundary := 0
@@ -129749,7 +129749,7 @@ ENDFOR
<parameter type="void*" varname="mt" etype="FP64" memwidth="512"/>
<parameter type="__mmask8" varname="k" etype="MASK"/>
<parameter type="__m512d" varname="v1" etype="FP64"/>
- <description>Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <description>Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>
storeOffset := 0
foundNext64BytesBoundary := 0
@@ -129778,7 +129778,7 @@ ENDFOR
<return type="void"/>
<parameter type="void*" varname="mt" etype="FP64" memwidth="512"/>
<parameter type="__m512d" varname="v1" etype="FP64"/>
- <description>Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt").</description>
+ <description>Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt").</description>
<operation>
storeOffset := 0
addr := mt
@@ -129802,7 +129802,7 @@ ENDFOR
<parameter type="void*" varname="mt" etype="FP64" memwidth="512"/>
<parameter type="__mmask8" varname="k" etype="MASK"/>
<parameter type="__m512d" varname="v1" etype="FP64"/>
- <description>Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <description>Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address following "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
<operation>
storeOffset := 0
addr := mt
@@ -131673,7 +131673,7 @@ ENDFOR
<parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
<parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
<parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
- <description>Down-converts 8 packed double-precision (64-bit) floating-point elements in "a" using "conv" and stores them in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale". Elements are written to memory using writemask "k" (elements are not stored to memory when the corresponding mask bit is not set; the memory location is left unchagned). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <description>Down-converts 8 packed double-precision (64-bit) floating-point elements in "a" using "conv" and stores them in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale". Elements are written to memory using writemask "k" (elements are not stored to memory when the corresponding mask bit is not set; the memory location is left unchanged). "hint" indicates to the processor whether the data is non-temporal.</description>
<operation>
FOR j := 0 to 7
i := j*64
@@ -135543,7 +135543,7 @@ ENDFOR
<return type="void"/>
<parameter type="char const*" varname="p" etype="UI8"/>
<parameter type="int" varname="i" etype="IMM" immwidth="2"/>
- <description>Fetch the line of data from memory that contains address "p" to a location in the cache heirarchy specified by the locality hint "i".</description>
+ <description>Fetch the line of data from memory that contains address "p" to a location in the cache hierarchy specified by the locality hint "i".</description>
<instruction name="PREFETCHWT1" form="m8" xed="PREFETCHWT1_MEMu8"/>
<header>xmmintrin.h</header>
</intrinsic>
@@ -136102,7 +136102,7 @@ MXCSR := a[31:0]
<return type="void"/>
<parameter type="char const*" varname="p" etype="UI8"/>
<parameter type="int" varname="i" etype="IMM" immwidth="2"/>
- <description>Fetch the line of data from memory that contains address "p" to a location in the cache heirarchy specified by the locality hint "i".</description>
+ <description>Fetch the line of data from memory that contains address "p" to a location in the cache hierarchy specified by the locality hint "i".</description>
<instruction name="PREFETCHNTA" form="m8" xed="PREFETCHNTA_MEMmprefetch"/>
<instruction name="PREFETCHT0" form="m8" xed="PREFETCHT0_MEMmprefetch"/>
<instruction name="PREFETCHT1" form="m8" xed="PREFETCHT1_MEMmprefetch"/>
@@ -148134,4 +148134,4 @@ ENDFOR
<instruction name="XRSTORS64" form="m8" xed="XRSTORS64_MEMmxsave"/>
<header>immintrin.h</header>
</intrinsic>
-</intrinsics_list> \ No newline at end of file
+</intrinsics_list>
diff --git a/library/test/src/console.rs b/library/test/src/console.rs
index 1ee68c854..7eee4ca23 100644
--- a/library/test/src/console.rs
+++ b/library/test/src/console.rs
@@ -41,6 +41,46 @@ impl<T: Write> Write for OutputLocation<T> {
}
}
+pub struct ConsoleTestDiscoveryState {
+ pub log_out: Option<File>,
+ pub tests: usize,
+ pub benchmarks: usize,
+ pub ignored: usize,
+ pub options: Options,
+}
+
+impl ConsoleTestDiscoveryState {
+ pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestDiscoveryState> {
+ let log_out = match opts.logfile {
+ Some(ref path) => Some(File::create(path)?),
+ None => None,
+ };
+
+ Ok(ConsoleTestDiscoveryState {
+ log_out,
+ tests: 0,
+ benchmarks: 0,
+ ignored: 0,
+ options: opts.options,
+ })
+ }
+
+ pub fn write_log<F, S>(&mut self, msg: F) -> io::Result<()>
+ where
+ S: AsRef<str>,
+ F: FnOnce() -> S,
+ {
+ match self.log_out {
+ None => Ok(()),
+ Some(ref mut o) => {
+ let msg = msg();
+ let msg = msg.as_ref();
+ o.write_all(msg.as_bytes())
+ }
+ }
+ }
+}
+
pub struct ConsoleTestState {
pub log_out: Option<File>,
pub total: usize,
@@ -138,53 +178,44 @@ impl ConsoleTestState {
// List the tests to console, and optionally to logfile. Filters are honored.
pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
- let mut output = match term::stdout() {
+ let output = match term::stdout() {
None => OutputLocation::Raw(io::stdout().lock()),
Some(t) => OutputLocation::Pretty(t),
};
- let quiet = opts.format == OutputFormat::Terse;
- let mut st = ConsoleTestState::new(opts)?;
-
- let mut ntest = 0;
- let mut nbench = 0;
+ let mut out: Box<dyn OutputFormatter> = match opts.format {
+ OutputFormat::Pretty | OutputFormat::Junit => {
+ Box::new(PrettyFormatter::new(output, false, 0, false, None))
+ }
+ OutputFormat::Terse => Box::new(TerseFormatter::new(output, false, 0, false)),
+ OutputFormat::Json => Box::new(JsonFormatter::new(output)),
+ };
+ let mut st = ConsoleTestDiscoveryState::new(opts)?;
+ out.write_discovery_start()?;
for test in filter_tests(opts, tests).into_iter() {
use crate::TestFn::*;
- let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test;
+ let TestDescAndFn { desc, testfn } = test;
let fntype = match testfn {
StaticTestFn(..) | DynTestFn(..) => {
- ntest += 1;
+ st.tests += 1;
"test"
}
StaticBenchFn(..) | DynBenchFn(..) => {
- nbench += 1;
+ st.benchmarks += 1;
"benchmark"
}
};
- writeln!(output, "{name}: {fntype}")?;
- st.write_log(|| format!("{fntype} {name}\n"))?;
- }
+ st.ignored += if desc.ignore { 1 } else { 0 };
- fn plural(count: u32, s: &str) -> String {
- match count {
- 1 => format!("1 {s}"),
- n => format!("{n} {s}s"),
- }
+ out.write_test_discovered(&desc, fntype)?;
+ st.write_log(|| format!("{fntype} {}\n", desc.name))?;
}
- if !quiet {
- if ntest != 0 || nbench != 0 {
- writeln!(output)?;
- }
-
- writeln!(output, "{}, {}", plural(ntest, "test"), plural(nbench, "benchmark"))?;
- }
-
- Ok(())
+ out.write_discovery_finish(&st)
}
// Updates `ConsoleTestState` depending on result of the test execution.
diff --git a/library/test/src/formatters/json.rs b/library/test/src/formatters/json.rs
index 95d2faf25..40976ec5e 100644
--- a/library/test/src/formatters/json.rs
+++ b/library/test/src/formatters/json.rs
@@ -2,7 +2,7 @@ use std::{borrow::Cow, io, io::prelude::Write};
use super::OutputFormatter;
use crate::{
- console::{ConsoleTestState, OutputLocation},
+ console::{ConsoleTestDiscoveryState, ConsoleTestState, OutputLocation},
test_result::TestResult,
time,
types::TestDesc,
@@ -60,6 +60,56 @@ impl<T: Write> JsonFormatter<T> {
}
impl<T: Write> OutputFormatter for JsonFormatter<T> {
+ fn write_discovery_start(&mut self) -> io::Result<()> {
+ self.writeln_message(&format!(r#"{{ "type": "suite", "event": "discovery" }}"#))
+ }
+
+ fn write_test_discovered(&mut self, desc: &TestDesc, test_type: &str) -> io::Result<()> {
+ let TestDesc {
+ name,
+ ignore,
+ ignore_message,
+ #[cfg(not(bootstrap))]
+ source_file,
+ #[cfg(not(bootstrap))]
+ start_line,
+ #[cfg(not(bootstrap))]
+ start_col,
+ #[cfg(not(bootstrap))]
+ end_line,
+ #[cfg(not(bootstrap))]
+ end_col,
+ ..
+ } = desc;
+
+ #[cfg(bootstrap)]
+ let source_file = "";
+ #[cfg(bootstrap)]
+ let start_line = 0;
+ #[cfg(bootstrap)]
+ let start_col = 0;
+ #[cfg(bootstrap)]
+ let end_line = 0;
+ #[cfg(bootstrap)]
+ let end_col = 0;
+
+ self.writeln_message(&format!(
+ r#"{{ "type": "{test_type}", "event": "discovered", "name": "{}", "ignore": {ignore}, "ignore_message": "{}", "source_path": "{}", "start_line": {start_line}, "start_col": {start_col}, "end_line": {end_line}, "end_col": {end_col} }}"#,
+ EscapedString(name.as_slice()),
+ ignore_message.unwrap_or(""),
+ EscapedString(source_file),
+ ))
+ }
+
+ fn write_discovery_finish(&mut self, state: &ConsoleTestDiscoveryState) -> io::Result<()> {
+ let ConsoleTestDiscoveryState { tests, benchmarks, ignored, .. } = state;
+
+ let total = tests + benchmarks;
+ self.writeln_message(&format!(
+ r#"{{ "type": "suite", "event": "completed", "tests": {tests}, "benchmarks": {benchmarks}, "total": {total}, "ignored": {ignored} }}"#
+ ))
+ }
+
fn write_run_start(&mut self, test_count: usize, shuffle_seed: Option<u64>) -> io::Result<()> {
let shuffle_seed_json = if let Some(shuffle_seed) = shuffle_seed {
format!(r#", "shuffle_seed": {shuffle_seed}"#)
diff --git a/library/test/src/formatters/junit.rs b/library/test/src/formatters/junit.rs
index 7a40ce33c..2e07ce3c0 100644
--- a/library/test/src/formatters/junit.rs
+++ b/library/test/src/formatters/junit.rs
@@ -3,7 +3,7 @@ use std::time::Duration;
use super::OutputFormatter;
use crate::{
- console::{ConsoleTestState, OutputLocation},
+ console::{ConsoleTestDiscoveryState, ConsoleTestState, OutputLocation},
test_result::TestResult,
time,
types::{TestDesc, TestType},
@@ -27,6 +27,18 @@ impl<T: Write> JunitFormatter<T> {
}
impl<T: Write> OutputFormatter for JunitFormatter<T> {
+ fn write_discovery_start(&mut self) -> io::Result<()> {
+ Err(io::Error::new(io::ErrorKind::NotFound, "Not yet implemented!"))
+ }
+
+ fn write_test_discovered(&mut self, _desc: &TestDesc, _test_type: &str) -> io::Result<()> {
+ Err(io::Error::new(io::ErrorKind::NotFound, "Not yet implemented!"))
+ }
+
+ fn write_discovery_finish(&mut self, _state: &ConsoleTestDiscoveryState) -> io::Result<()> {
+ Err(io::Error::new(io::ErrorKind::NotFound, "Not yet implemented!"))
+ }
+
fn write_run_start(
&mut self,
_test_count: usize,
diff --git a/library/test/src/formatters/mod.rs b/library/test/src/formatters/mod.rs
index cb67b6491..bc6ffebc1 100644
--- a/library/test/src/formatters/mod.rs
+++ b/library/test/src/formatters/mod.rs
@@ -1,7 +1,7 @@
use std::{io, io::prelude::Write};
use crate::{
- console::ConsoleTestState,
+ console::{ConsoleTestDiscoveryState, ConsoleTestState},
test_result::TestResult,
time,
types::{TestDesc, TestName},
@@ -18,6 +18,10 @@ pub(crate) use self::pretty::PrettyFormatter;
pub(crate) use self::terse::TerseFormatter;
pub(crate) trait OutputFormatter {
+ fn write_discovery_start(&mut self) -> io::Result<()>;
+ fn write_test_discovered(&mut self, desc: &TestDesc, test_type: &str) -> io::Result<()>;
+ fn write_discovery_finish(&mut self, state: &ConsoleTestDiscoveryState) -> io::Result<()>;
+
fn write_run_start(&mut self, test_count: usize, shuffle_seed: Option<u64>) -> io::Result<()>;
fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()>;
fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()>;
diff --git a/library/test/src/formatters/pretty.rs b/library/test/src/formatters/pretty.rs
index 247778e51..22654a340 100644
--- a/library/test/src/formatters/pretty.rs
+++ b/library/test/src/formatters/pretty.rs
@@ -3,7 +3,7 @@ use std::{io, io::prelude::Write};
use super::OutputFormatter;
use crate::{
bench::fmt_bench_samples,
- console::{ConsoleTestState, OutputLocation},
+ console::{ConsoleTestDiscoveryState, ConsoleTestState, OutputLocation},
term,
test_result::TestResult,
time,
@@ -181,6 +181,33 @@ impl<T: Write> PrettyFormatter<T> {
}
impl<T: Write> OutputFormatter for PrettyFormatter<T> {
+ fn write_discovery_start(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+
+ fn write_test_discovered(&mut self, desc: &TestDesc, test_type: &str) -> io::Result<()> {
+ self.write_plain(format!("{}: {test_type}\n", desc.name))
+ }
+
+ fn write_discovery_finish(&mut self, state: &ConsoleTestDiscoveryState) -> io::Result<()> {
+ fn plural(count: usize, s: &str) -> String {
+ match count {
+ 1 => format!("1 {s}"),
+ n => format!("{n} {s}s"),
+ }
+ }
+
+ if state.tests != 0 || state.benchmarks != 0 {
+ self.write_plain("\n")?;
+ }
+
+ self.write_plain(format!(
+ "{}, {}\n",
+ plural(state.tests, "test"),
+ plural(state.benchmarks, "benchmark")
+ ))
+ }
+
fn write_run_start(&mut self, test_count: usize, shuffle_seed: Option<u64>) -> io::Result<()> {
let noun = if test_count != 1 { "tests" } else { "test" };
let shuffle_seed_msg = if let Some(shuffle_seed) = shuffle_seed {
diff --git a/library/test/src/formatters/terse.rs b/library/test/src/formatters/terse.rs
index a431acfbc..2931ca6ea 100644
--- a/library/test/src/formatters/terse.rs
+++ b/library/test/src/formatters/terse.rs
@@ -3,7 +3,7 @@ use std::{io, io::prelude::Write};
use super::OutputFormatter;
use crate::{
bench::fmt_bench_samples,
- console::{ConsoleTestState, OutputLocation},
+ console::{ConsoleTestDiscoveryState, ConsoleTestState, OutputLocation},
term,
test_result::TestResult,
time,
@@ -167,6 +167,18 @@ impl<T: Write> TerseFormatter<T> {
}
impl<T: Write> OutputFormatter for TerseFormatter<T> {
+ fn write_discovery_start(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+
+ fn write_test_discovered(&mut self, desc: &TestDesc, test_type: &str) -> io::Result<()> {
+ self.write_plain(format!("{}: {test_type}\n", desc.name))
+ }
+
+ fn write_discovery_finish(&mut self, _state: &ConsoleTestDiscoveryState) -> io::Result<()> {
+ Ok(())
+ }
+
fn write_run_start(&mut self, test_count: usize, shuffle_seed: Option<u64>) -> io::Result<()> {
self.total_test_count = test_count;
let noun = if test_count != 1 { "tests" } else { "test" };
diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs
index 88d8e5fe9..e76d6716b 100644
--- a/library/test/src/lib.rs
+++ b/library/test/src/lib.rs
@@ -17,7 +17,6 @@
#![unstable(feature = "test", issue = "50297")]
#![doc(test(attr(deny(warnings))))]
#![feature(internal_output_capture)]
-#![feature(is_terminal)]
#![feature(staged_api)]
#![feature(process_exitcode_internals)]
#![feature(panic_can_unwind)]
@@ -220,14 +219,14 @@ pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> {
struct FilteredTests {
tests: Vec<(TestId, TestDescAndFn)>,
- benchs: Vec<(TestId, TestDescAndFn)>,
+ benches: Vec<(TestId, TestDescAndFn)>,
next_id: usize,
}
impl FilteredTests {
fn add_bench(&mut self, desc: TestDesc, testfn: TestFn) {
let test = TestDescAndFn { desc, testfn };
- self.benchs.push((TestId(self.next_id), test));
+ self.benches.push((TestId(self.next_id), test));
self.next_id += 1;
}
fn add_test(&mut self, desc: TestDesc, testfn: TestFn) {
@@ -246,7 +245,7 @@ impl FilteredTests {
self.add_test(desc, testfn);
}
fn total_len(&self) -> usize {
- self.tests.len() + self.benchs.len()
+ self.tests.len() + self.benches.len()
}
}
@@ -291,7 +290,7 @@ where
let tests_len = tests.len();
- let mut filtered = FilteredTests { tests: Vec::new(), benchs: Vec::new(), next_id: 0 };
+ let mut filtered = FilteredTests { tests: Vec::new(), benches: Vec::new(), next_id: 0 };
for test in filter_tests(opts, tests) {
let mut desc = test.desc;
@@ -458,7 +457,7 @@ where
if opts.bench_benchmarks {
// All benchmarks run at the end, in serial.
- for (id, b) in filtered.benchs {
+ for (id, b) in filtered.benches {
let event = TestEvent::TeWait(b.desc.clone());
notify_about_test_event(event)?;
let join_handle = run_test(opts, false, id, b, run_strategy, tx.clone());
diff --git a/library/test/src/tests.rs b/library/test/src/tests.rs
index 44776fb0a..5ffdbf73f 100644
--- a/library/test/src/tests.rs
+++ b/library/test/src/tests.rs
@@ -63,6 +63,16 @@ fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
name: StaticTestName("1"),
ignore: true,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -75,6 +85,16 @@ fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
name: StaticTestName("2"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -95,6 +115,16 @@ pub fn do_not_run_ignored_tests() {
name: StaticTestName("whatever"),
ignore: true,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -118,6 +148,16 @@ pub fn ignored_tests_result_in_ignored() {
name: StaticTestName("whatever"),
ignore: true,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -143,6 +183,16 @@ fn test_should_panic() {
name: StaticTestName("whatever"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::Yes,
compile_fail: false,
no_run: false,
@@ -168,6 +218,16 @@ fn test_should_panic_good_message() {
name: StaticTestName("whatever"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::YesWithMessage("error message"),
compile_fail: false,
no_run: false,
@@ -198,6 +258,16 @@ fn test_should_panic_bad_message() {
name: StaticTestName("whatever"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::YesWithMessage(expected),
compile_fail: false,
no_run: false,
@@ -232,6 +302,16 @@ fn test_should_panic_non_string_message_type() {
name: StaticTestName("whatever"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::YesWithMessage(expected),
compile_fail: false,
no_run: false,
@@ -260,6 +340,16 @@ fn test_should_panic_but_succeeds() {
name: StaticTestName("whatever"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic,
compile_fail: false,
no_run: false,
@@ -288,6 +378,16 @@ fn report_time_test_template(report_time: bool) -> Option<TestExecTime> {
name: StaticTestName("whatever"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -325,6 +425,16 @@ fn time_test_failure_template(test_type: TestType) -> TestResult {
name: StaticTestName("whatever"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -364,6 +474,16 @@ fn typed_test_desc(test_type: TestType) -> TestDesc {
name: StaticTestName("whatever"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -476,6 +596,16 @@ pub fn exclude_should_panic_option() {
name: StaticTestName("3"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::Yes,
compile_fail: false,
no_run: false,
@@ -500,6 +630,16 @@ pub fn exact_filter_match() {
name: StaticTestName(name),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -591,6 +731,16 @@ fn sample_tests() -> Vec<TestDescAndFn> {
name: DynTestName((*name).clone()),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -720,6 +870,16 @@ pub fn test_bench_no_iter() {
name: StaticTestName("f"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -743,6 +903,16 @@ pub fn test_bench_iter() {
name: StaticTestName("f"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -759,6 +929,16 @@ fn should_sort_failures_before_printing_them() {
name: StaticTestName("a"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -769,6 +949,16 @@ fn should_sort_failures_before_printing_them() {
name: StaticTestName("b"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
@@ -816,6 +1006,16 @@ fn test_dyn_bench_returning_err_fails_when_run_as_test() {
name: StaticTestName("whatever"),
ignore: false,
ignore_message: None,
+ #[cfg(not(bootstrap))]
+ source_file: "",
+ #[cfg(not(bootstrap))]
+ start_line: 0,
+ #[cfg(not(bootstrap))]
+ start_col: 0,
+ #[cfg(not(bootstrap))]
+ end_line: 0,
+ #[cfg(not(bootstrap))]
+ end_col: 0,
should_panic: ShouldPanic::No,
compile_fail: false,
no_run: false,
diff --git a/library/test/src/types.rs b/library/test/src/types.rs
index 6f2e03309..8d4e204c8 100644
--- a/library/test/src/types.rs
+++ b/library/test/src/types.rs
@@ -119,6 +119,16 @@ pub struct TestDesc {
pub name: TestName,
pub ignore: bool,
pub ignore_message: Option<&'static str>,
+ #[cfg(not(bootstrap))]
+ pub source_file: &'static str,
+ #[cfg(not(bootstrap))]
+ pub start_line: usize,
+ #[cfg(not(bootstrap))]
+ pub start_col: usize,
+ #[cfg(not(bootstrap))]
+ pub end_line: usize,
+ #[cfg(not(bootstrap))]
+ pub end_col: usize,
pub should_panic: options::ShouldPanic,
pub compile_fail: bool,
pub no_run: bool,
diff --git a/library/unwind/src/lib.rs b/library/unwind/src/lib.rs
index edc10aa39..b655bae96 100644
--- a/library/unwind/src/lib.rs
+++ b/library/unwind/src/lib.rs
@@ -54,6 +54,22 @@ cfg_if::cfg_if! {
}
}
+// This is the same as musl except that we default to using the system libunwind
+// instead of libgcc.
+#[cfg(target_env = "ohos")]
+cfg_if::cfg_if! {
+ if #[cfg(all(feature = "llvm-libunwind", feature = "system-llvm-libunwind"))] {
+ compile_error!("`llvm-libunwind` and `system-llvm-libunwind` cannot be enabled at the same time");
+ } else if #[cfg(feature = "llvm-libunwind")] {
+ #[link(name = "unwind", kind = "static", modifiers = "-bundle")]
+ extern "C" {}
+ } else {
+ #[link(name = "unwind", kind = "static", modifiers = "-bundle", cfg(target_feature = "crt-static"))]
+ #[link(name = "unwind", cfg(not(target_feature = "crt-static")))]
+ extern "C" {}
+ }
+}
+
#[cfg(target_os = "android")]
cfg_if::cfg_if! {
if #[cfg(feature = "llvm-libunwind")] {
diff --git a/library/unwind/src/libunwind.rs b/library/unwind/src/libunwind.rs
index eeeed3afc..f6a68073b 100644
--- a/library/unwind/src/libunwind.rs
+++ b/library/unwind/src/libunwind.rs
@@ -75,6 +75,9 @@ pub const unwinder_private_data_size: usize = 20;
#[cfg(all(target_arch = "hexagon", target_os = "linux"))]
pub const unwinder_private_data_size: usize = 35;
+#[cfg(target_arch = "loongarch64")]
+pub const unwinder_private_data_size: usize = 2;
+
#[repr(C)]
pub struct _Unwind_Exception {
pub exception_class: _Unwind_Exception_Class,